From 9d01553b0fcc57089a85de4204f3ffe054f1a68b Mon Sep 17 00:00:00 2001 From: Aleksander Zaruczewski Date: Tue, 31 Oct 2023 03:33:45 -0400 Subject: [PATCH] chore(userconfig): improve naming and docs (#1420) --- CHANGELOG.md | 5 +- Makefile | 39 +- docs/data-sources/cassandra.md | 6 +- docs/data-sources/clickhouse.md | 6 +- docs/data-sources/flink.md | 4 +- docs/data-sources/grafana.md | 24 +- docs/data-sources/influxdb.md | 6 +- docs/data-sources/kafka.md | 6 +- docs/data-sources/kafka_connect.md | 6 +- docs/data-sources/kafka_mirrormaker.md | 6 +- docs/data-sources/m3aggregator.md | 4 +- docs/data-sources/m3db.md | 12 +- docs/data-sources/mysql.md | 6 +- docs/data-sources/opensearch.md | 8 +- docs/data-sources/pg.md | 8 +- docs/data-sources/redis.md | 6 +- docs/resources/cassandra.md | 14 +- docs/resources/clickhouse.md | 14 +- docs/resources/flink.md | 8 +- docs/resources/grafana.md | 48 +- docs/resources/influxdb.md | 16 +- docs/resources/kafka.md | 28 +- docs/resources/kafka_connect.md | 16 +- docs/resources/kafka_mirrormaker.md | 10 +- docs/resources/m3aggregator.md | 8 +- docs/resources/m3db.md | 48 +- docs/resources/mysql.md | 18 +- docs/resources/opensearch.md | 34 +- docs/resources/pg.md | 30 +- docs/resources/redis.md | 16 +- go.mod | 4 + go.sum | 7 + internal/schemautil/converters.go | 426 ++++++++++++ internal/schemautil/service.go | 40 +- .../userconfig/apiconvert/fromapi.go | 252 +++---- .../userconfig/apiconvert/fromapi_test.go | 202 +++--- .../schemautil/userconfig/apiconvert/toapi.go | 437 ++++++------ .../userconfig/apiconvert/toapi_test.go | 560 ++++++++-------- .../schemautil/userconfig/apiconvert/util.go | 33 +- .../userconfig/apiconvert/util_test.go | 50 +- internal/schemautil/userconfig/convert.go | 78 ++- internal/schemautil/userconfig/desc.go | 114 ++-- .../dist/integration_endpoint_types.go | 3 +- .../userconfig/dist/integration_types.go | 3 +- .../userconfig/dist/service_types.go | 3 +- internal/schemautil/userconfig/handle.go | 241 ++++--- .../typeupgrader/typeupgrader.go | 40 +- .../typeupgrader/typeupgrader_test.go | 84 +-- .../stateupgrader/v0/cassandra/cassandra.go | 23 +- .../stateupgrader/v0/flink/flink.go | 14 +- .../stateupgrader/v0/grafana/grafana.go | 48 +- .../stateupgrader/v0/influxdb/influxdb.go | 26 +- .../stateupgrader/v0/kafka/kafka.go | 44 +- .../stateupgrader/v0/kafka/kafka_connect.go | 26 +- .../v0/kafka/kafka_mirrormaker.go | 14 +- .../stateupgrader/v0/kafka/kafka_topic.go | 10 +- .../stateupgrader/v0/m3/m3aggregator.go | 10 +- .../userconfig/stateupgrader/v0/m3/m3db.go | 39 +- .../stateupgrader/v0/mysql/mysql.go | 30 +- .../stateupgrader/v0/opensearch/opensearch.go | 38 +- .../userconfig/stateupgrader/v0/pg/pg.go | 42 +- .../stateupgrader/v0/redis/redis.go | 26 +- .../serviceintegration/service_integration.go | 42 +- .../service_integration_endpoint.go | 30 +- .../schemautil/userconfig/userconfig_test.go | 76 ++- internal/schemautil/userconfig/util.go | 222 +++--- .../service/cassandra/cassandra.go | 5 +- .../service/clickhouse/clickhouse.go | 5 +- internal/sdkprovider/service/flink/flink.go | 5 +- .../sdkprovider/service/grafana/grafana.go | 5 +- .../service/grafana/grafana_test.go | 62 ++ .../sdkprovider/service/influxdb/influxdb.go | 5 +- internal/sdkprovider/service/kafka/kafka.go | 13 +- .../service/kafka/kafka_connect.go | 5 +- .../service/kafka/kafka_mirrormaker.go | 5 +- .../sdkprovider/service/kafka/kafka_test.go | 10 +- .../sdkprovider/service/m3db/m3aggregator.go | 5 +- internal/sdkprovider/service/m3db/m3db.go | 5 +- internal/sdkprovider/service/mysql/mysql.go | 5 +- .../service/opensearch/opensearch.go | 5 +- internal/sdkprovider/service/pg/pg.go | 10 +- internal/sdkprovider/service/redis/redis.go | 5 +- .../userconfig/service/cassandra.go | 159 +++++ .../userconfig/service/clickhouse.go | 154 +++++ .../sdkprovider/userconfig/service/flink.go | 90 +++ .../sdkprovider/userconfig/service/grafana.go | 617 +++++++++++++++++ .../userconfig/service/influxdb.go | 171 +++++ .../sdkprovider/userconfig/service/kafka.go | 608 +++++++++++++++++ .../userconfig/service/kafka_connect.go | 227 +++++++ .../userconfig/service/kafka_mirrormaker.go | 132 ++++ .../userconfig/service/m3aggregator.go | 83 +++ .../sdkprovider/userconfig/service/m3db.go | 366 ++++++++++ .../sdkprovider/userconfig/service/mysql.go | 407 +++++++++++ .../userconfig/service/opensearch.go | 547 +++++++++++++++ internal/sdkprovider/userconfig/service/pg.go | 630 ++++++++++++++++++ .../sdkprovider/userconfig/service/redis.go | 261 ++++++++ .../sdkprovider/userconfig/service/service.go | 99 +++ main.go | 1 + ucgenerator/main.go | 406 +++++++++++ ucgenerator/models.go | 283 ++++++++ 100 files changed, 7465 insertions(+), 1728 deletions(-) create mode 100644 internal/schemautil/converters.go create mode 100644 internal/sdkprovider/userconfig/service/cassandra.go create mode 100644 internal/sdkprovider/userconfig/service/clickhouse.go create mode 100644 internal/sdkprovider/userconfig/service/flink.go create mode 100644 internal/sdkprovider/userconfig/service/grafana.go create mode 100644 internal/sdkprovider/userconfig/service/influxdb.go create mode 100644 internal/sdkprovider/userconfig/service/kafka.go create mode 100644 internal/sdkprovider/userconfig/service/kafka_connect.go create mode 100644 internal/sdkprovider/userconfig/service/kafka_mirrormaker.go create mode 100644 internal/sdkprovider/userconfig/service/m3aggregator.go create mode 100644 internal/sdkprovider/userconfig/service/m3db.go create mode 100644 internal/sdkprovider/userconfig/service/mysql.go create mode 100644 internal/sdkprovider/userconfig/service/opensearch.go create mode 100644 internal/sdkprovider/userconfig/service/pg.go create mode 100644 internal/sdkprovider/userconfig/service/redis.go create mode 100644 internal/sdkprovider/userconfig/service/service.go create mode 100644 ucgenerator/main.go create mode 100644 ucgenerator/models.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 2a25f01f5..c54480e4a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,10 @@ nav_order: 1 # Changelog -## [X.Y.Z] - YYYY-MM-DD +## [4.10.0] - YYYY-MM-DD + +- Add new user config generator +- Use `TypeSet` for arrays with scalar values ## [4.9.3] - 2023-10-27 diff --git a/Makefile b/Makefile index 45d16dee8..3d991219a 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: build build-dev test test-unit test-acc test-examples lint lint-go lint-test lint-docs fmt fmt-test docs clean clean-tools sweep go-generate generate imports +.PHONY: build build-dev test test-unit test-acc test-examples lint lint-go lint-test lint-docs fmt fmt-test fmt-imports clean clean-tools clean-examples sweep generate gen-go docs ################################################# # Global @@ -83,8 +83,6 @@ test-acc: TF_ACC=1 $(GO) test ./$(PKG_PATH)/... \ -v -count $(TEST_COUNT) -parallel $(ACC_TEST_PARALLELISM) $(RUNARGS) $(TESTARGS) -timeout $(ACC_TEST_TIMEOUT) -clean-examples: - find ./examples -type f -name '*.tfstate*' -delete test-examples: build-dev clean-examples AIVEN_PROVIDER_PATH=$(BUILD_DEV_DIR) $(GO) test --tags=examples ./examples_tests/... \ @@ -112,42 +110,51 @@ lint-docs: $(TFPLUGINDOCS) # Format ################################################# -fmt: imports fmt-test +fmt: fmt-test fmt-imports + fmt-test: $(TERRAFMT) $(TERRAFMT) fmt ./internal -fv -# On MACOS requires gnu-sed. Run `brew info gnu-sed` and follow instructions to replace default sed. -imports: + +# macOS requires to install GNU sed first. Use `brew install gnu-sed` to install it. +# It has to be added to PATH as `sed` command, to replace default BSD sed. +# See `brew info gnu-sed` for more details on how to add it to PATH. +fmt-imports: find . -type f -name '*.go' -exec sed -zi 's/"\n\+\t"/"\n"/g' {} + goimports -local "github.com/aiven/terraform-provider-aiven" -w . -################################################# -# Docs -################################################# - -docs: $(TFPLUGINDOCS) - $(TFPLUGINDOCS) generate - ################################################# # Clean ################################################# -clean: clean-tools sweep +clean: clean-tools clean-examples sweep clean-tools: $(TOOLS_BIN_DIR) rm -rf $(TOOLS_BIN_DIR) +clean-examples: + find ./examples -type f -name '*.tfstate*' -delete + + SWEEP ?= global sweep: @echo 'WARNING: This will destroy infrastructure. Use only in development accounts.' $(GO) test ./internal/sweep -v -tags=sweep -sweep=$(SWEEP) $(SWEEP_ARGS) -timeout 15m -go-generate: +################################################# +# Generate +################################################# + +generate: gen-go fmt-imports docs + + +gen-go: go generate ./... -generate: go-generate docs +docs: $(TFPLUGINDOCS) + $(TFPLUGINDOCS) generate diff --git a/docs/data-sources/cassandra.md b/docs/data-sources/cassandra.md index 2442962ab..957f0d950 100644 --- a/docs/data-sources/cassandra.md +++ b/docs/data-sources/cassandra.md @@ -68,14 +68,14 @@ Read-Only: Read-Only: -- `additional_backup_regions` (List of String) +- `additional_backup_regions` (Set of String) - `backup_hour` (Number) - `backup_minute` (Number) - `cassandra` (List of Object) (see [below for nested schema](#nestedobjatt--cassandra_user_config--cassandra)) - `cassandra_version` (String) -- `ip_filter` (List of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--cassandra_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `migrate_sstableloader` (Boolean) - `private_access` (List of Object) (see [below for nested schema](#nestedobjatt--cassandra_user_config--private_access)) - `project_to_fork_from` (String) diff --git a/docs/data-sources/clickhouse.md b/docs/data-sources/clickhouse.md index 1451534f0..2b9c89ea6 100644 --- a/docs/data-sources/clickhouse.md +++ b/docs/data-sources/clickhouse.md @@ -68,10 +68,10 @@ Read-Only: Read-Only: -- `additional_backup_regions` (List of String) -- `ip_filter` (List of String) +- `additional_backup_regions` (Set of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--clickhouse_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `private_access` (List of Object) (see [below for nested schema](#nestedobjatt--clickhouse_user_config--private_access)) - `privatelink_access` (List of Object) (see [below for nested schema](#nestedobjatt--clickhouse_user_config--privatelink_access)) - `project_to_fork_from` (String) diff --git a/docs/data-sources/flink.md b/docs/data-sources/flink.md index 39ede94ce..8e888dc8b 100644 --- a/docs/data-sources/flink.md +++ b/docs/data-sources/flink.md @@ -84,9 +84,9 @@ Read-Only: Read-Only: - `flink_version` (String) -- `ip_filter` (List of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--flink_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `number_of_task_slots` (Number) - `privatelink_access` (List of Object) (see [below for nested schema](#nestedobjatt--flink_user_config--privatelink_access)) diff --git a/docs/data-sources/grafana.md b/docs/data-sources/grafana.md index 75f8faf5c..427848fc6 100644 --- a/docs/data-sources/grafana.md +++ b/docs/data-sources/grafana.md @@ -82,7 +82,7 @@ Read-Only: Read-Only: -- `additional_backup_regions` (List of String) +- `additional_backup_regions` (Set of String) - `alerting_enabled` (Boolean) - `alerting_error_or_timeout` (String) - `alerting_max_annotations_to_keep` (Number) @@ -106,9 +106,9 @@ Read-Only: - `editors_can_admin` (Boolean) - `external_image_storage` (List of Object) (see [below for nested schema](#nestedobjatt--grafana_user_config--external_image_storage)) - `google_analytics_ua_id` (String) -- `ip_filter` (List of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--grafana_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `metrics_enabled` (Boolean) - `oauth_allow_insecure_email_lookup` (Boolean) - `private_access` (List of Object) (see [below for nested schema](#nestedobjatt--grafana_user_config--private_access)) @@ -129,8 +129,8 @@ Read-Only: Read-Only: - `allow_sign_up` (Boolean) -- `allowed_domains` (List of String) -- `allowed_groups` (List of String) +- `allowed_domains` (Set of String) +- `allowed_groups` (Set of String) - `auth_url` (String) - `client_id` (String) - `client_secret` (String) @@ -143,15 +143,15 @@ Read-Only: Read-Only: - `allow_sign_up` (Boolean) -- `allowed_domains` (List of String) -- `allowed_organizations` (List of String) +- `allowed_domains` (Set of String) +- `allowed_organizations` (Set of String) - `api_url` (String) - `auth_url` (String) - `auto_login` (Boolean) - `client_id` (String) - `client_secret` (String) - `name` (String) -- `scopes` (List of String) +- `scopes` (Set of String) - `token_url` (String) @@ -161,10 +161,10 @@ Read-Only: Read-Only: - `allow_sign_up` (Boolean) -- `allowed_organizations` (List of String) +- `allowed_organizations` (Set of String) - `client_id` (String) - `client_secret` (String) -- `team_ids` (List of Number) +- `team_ids` (Set of Number) @@ -173,7 +173,7 @@ Read-Only: Read-Only: - `allow_sign_up` (Boolean) -- `allowed_groups` (List of String) +- `allowed_groups` (Set of String) - `api_url` (String) - `auth_url` (String) - `client_id` (String) @@ -187,7 +187,7 @@ Read-Only: Read-Only: - `allow_sign_up` (Boolean) -- `allowed_domains` (List of String) +- `allowed_domains` (Set of String) - `client_id` (String) - `client_secret` (String) diff --git a/docs/data-sources/influxdb.md b/docs/data-sources/influxdb.md index 7c198c34f..51f5bee5b 100644 --- a/docs/data-sources/influxdb.md +++ b/docs/data-sources/influxdb.md @@ -83,12 +83,12 @@ Read-Only: Read-Only: -- `additional_backup_regions` (List of String) +- `additional_backup_regions` (Set of String) - `custom_domain` (String) - `influxdb` (List of Object) (see [below for nested schema](#nestedobjatt--influxdb_user_config--influxdb)) -- `ip_filter` (List of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--influxdb_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `private_access` (List of Object) (see [below for nested schema](#nestedobjatt--influxdb_user_config--private_access)) - `privatelink_access` (List of Object) (see [below for nested schema](#nestedobjatt--influxdb_user_config--privatelink_access)) - `project_to_fork_from` (String) diff --git a/docs/data-sources/kafka.md b/docs/data-sources/kafka.md index d161e591a..74f6673ac 100644 --- a/docs/data-sources/kafka.md +++ b/docs/data-sources/kafka.md @@ -89,11 +89,11 @@ Read-Only: Read-Only: -- `additional_backup_regions` (List of String) +- `additional_backup_regions` (Set of String) - `custom_domain` (String) -- `ip_filter` (List of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `kafka` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_user_config--kafka)) - `kafka_authentication_methods` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_user_config--kafka_authentication_methods)) - `kafka_connect` (Boolean) diff --git a/docs/data-sources/kafka_connect.md b/docs/data-sources/kafka_connect.md index 2d774a226..c59085ac0 100644 --- a/docs/data-sources/kafka_connect.md +++ b/docs/data-sources/kafka_connect.md @@ -82,10 +82,10 @@ Read-Only: Read-Only: -- `additional_backup_regions` (List of String) -- `ip_filter` (List of String) +- `additional_backup_regions` (Set of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_connect_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `kafka_connect` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_connect_user_config--kafka_connect)) - `private_access` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_connect_user_config--private_access)) - `privatelink_access` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_connect_user_config--privatelink_access)) diff --git a/docs/data-sources/kafka_mirrormaker.md b/docs/data-sources/kafka_mirrormaker.md index 23fa4440f..4a8e3e804 100644 --- a/docs/data-sources/kafka_mirrormaker.md +++ b/docs/data-sources/kafka_mirrormaker.md @@ -82,10 +82,10 @@ Read-Only: Read-Only: -- `additional_backup_regions` (List of String) -- `ip_filter` (List of String) +- `additional_backup_regions` (Set of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_mirrormaker_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `kafka_mirrormaker` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_mirrormaker_user_config--kafka_mirrormaker)) - `static_ips` (Boolean) diff --git a/docs/data-sources/m3aggregator.md b/docs/data-sources/m3aggregator.md index 24e58377a..bec2779ed 100644 --- a/docs/data-sources/m3aggregator.md +++ b/docs/data-sources/m3aggregator.md @@ -83,9 +83,9 @@ Read-Only: Read-Only: - `custom_domain` (String) -- `ip_filter` (List of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--m3aggregator_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `m3_version` (String) - `m3aggregator_version` (String) - `static_ips` (Boolean) diff --git a/docs/data-sources/m3db.md b/docs/data-sources/m3db.md index 2af4137fb..850687d96 100644 --- a/docs/data-sources/m3db.md +++ b/docs/data-sources/m3db.md @@ -82,11 +82,11 @@ Read-Only: Read-Only: -- `additional_backup_regions` (List of String) +- `additional_backup_regions` (Set of String) - `custom_domain` (String) -- `ip_filter` (List of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--m3db_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `limits` (List of Object) (see [below for nested schema](#nestedobjatt--m3db_user_config--limits)) - `m3` (List of Object) (see [below for nested schema](#nestedobjatt--m3db_user_config--m3)) - `m3_version` (String) @@ -200,13 +200,13 @@ Read-Only: Read-Only: -- `aggregations` (List of String) +- `aggregations` (Set of String) - `drop` (Boolean) - `filter` (String) - `name` (String) -- `namespaces` (List of String) +- `namespaces` (Set of String) - `namespaces_object` (List of Object) (see [below for nested schema](#nestedobjatt--m3db_user_config--rules--mapping--namespaces_object)) -- `namespaces_string` (List of String) +- `namespaces_string` (Set of String) - `tags` (List of Object) (see [below for nested schema](#nestedobjatt--m3db_user_config--rules--mapping--tags)) diff --git a/docs/data-sources/mysql.md b/docs/data-sources/mysql.md index 7bd9111ed..cbd472025 100644 --- a/docs/data-sources/mysql.md +++ b/docs/data-sources/mysql.md @@ -82,15 +82,15 @@ Read-Only: Read-Only: -- `additional_backup_regions` (List of String) +- `additional_backup_regions` (Set of String) - `admin_password` (String) - `admin_username` (String) - `backup_hour` (Number) - `backup_minute` (Number) - `binlog_retention_period` (Number) -- `ip_filter` (List of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--mysql_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `migration` (List of Object) (see [below for nested schema](#nestedobjatt--mysql_user_config--migration)) - `mysql` (List of Object) (see [below for nested schema](#nestedobjatt--mysql_user_config--mysql)) - `mysql_version` (String) diff --git a/docs/data-sources/opensearch.md b/docs/data-sources/opensearch.md index f9ec47d95..32187036b 100644 --- a/docs/data-sources/opensearch.md +++ b/docs/data-sources/opensearch.md @@ -83,14 +83,14 @@ Read-Only: Read-Only: -- `additional_backup_regions` (List of String) +- `additional_backup_regions` (Set of String) - `custom_domain` (String) - `disable_replication_factor_adjustment` (Boolean) - `index_patterns` (List of Object) (see [below for nested schema](#nestedobjatt--opensearch_user_config--index_patterns)) - `index_template` (List of Object) (see [below for nested schema](#nestedobjatt--opensearch_user_config--index_template)) -- `ip_filter` (List of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--opensearch_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `keep_index_refresh_interval` (Boolean) - `max_index_count` (Number) - `openid` (List of Object) (see [below for nested schema](#nestedobjatt--opensearch_user_config--openid)) @@ -176,7 +176,7 @@ Read-Only: - `indices_recovery_max_bytes_per_sec` (Number) - `indices_recovery_max_concurrent_file_chunks` (Number) - `override_main_response_version` (Boolean) -- `reindex_remote_whitelist` (List of String) +- `reindex_remote_whitelist` (Set of String) - `script_max_compilations_rate` (String) - `search_max_buckets` (Number) - `thread_pool_analyze_queue_size` (Number) diff --git a/docs/data-sources/pg.md b/docs/data-sources/pg.md index c0dfeadd1..78cb272d5 100644 --- a/docs/data-sources/pg.md +++ b/docs/data-sources/pg.md @@ -91,15 +91,15 @@ Read-Only: Read-Only: -- `additional_backup_regions` (List of String) +- `additional_backup_regions` (Set of String) - `admin_password` (String) - `admin_username` (String) - `backup_hour` (Number) - `backup_minute` (Number) - `enable_ipv6` (Boolean) -- `ip_filter` (List of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--pg_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `migration` (List of Object) (see [below for nested schema](#nestedobjatt--pg_user_config--migration)) - `pg` (List of Object) (see [below for nested schema](#nestedobjatt--pg_user_config--pg)) - `pg_read_replica` (Boolean) @@ -210,7 +210,7 @@ Read-Only: - `autodb_max_db_connections` (Number) - `autodb_pool_mode` (String) - `autodb_pool_size` (Number) -- `ignore_startup_parameters` (List of String) +- `ignore_startup_parameters` (Set of String) - `min_pool_size` (Number) - `server_idle_timeout` (Number) - `server_lifetime` (Number) diff --git a/docs/data-sources/redis.md b/docs/data-sources/redis.md index 79b7b3bc5..6bcb1d2f9 100644 --- a/docs/data-sources/redis.md +++ b/docs/data-sources/redis.md @@ -82,10 +82,10 @@ Read-Only: Read-Only: -- `additional_backup_regions` (List of String) -- `ip_filter` (List of String) +- `additional_backup_regions` (Set of String) +- `ip_filter` (Set of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--redis_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) +- `ip_filter_string` (Set of String) - `migration` (List of Object) (see [below for nested schema](#nestedobjatt--redis_user_config--migration)) - `private_access` (List of Object) (see [below for nested schema](#nestedobjatt--redis_user_config--private_access)) - `privatelink_access` (List of Object) (see [below for nested schema](#nestedobjatt--redis_user_config--privatelink_access)) diff --git a/docs/resources/cassandra.md b/docs/resources/cassandra.md index 8e2bff40d..e64a9fd4d 100644 --- a/docs/resources/cassandra.md +++ b/docs/resources/cassandra.md @@ -77,18 +77,18 @@ resource "aiven_cassandra" "bar" { Optional: -- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. +- `additional_backup_regions` (Set of String) Additional Cloud Regions for Backup Replication. - `backup_hour` (Number) The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed. - `backup_minute` (Number) The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed. -- `cassandra` (Block List, Max: 1) cassandra configuration values. (see [below for nested schema](#nestedblock--cassandra_user_config--cassandra)) +- `cassandra` (Block List, Max: 1) cassandra configuration values (see [below for nested schema](#nestedblock--cassandra_user_config--cassandra)) - `cassandra_version` (String) Cassandra major version. -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--cassandra_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--cassandra_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. - `migrate_sstableloader` (Boolean) Sets the service into migration mode enabling the sstableloader utility to be used to upload Cassandra data files. Available only on service create. -- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks. (see [below for nested schema](#nestedblock--cassandra_user_config--private_access)) +- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--cassandra_user_config--private_access)) - `project_to_fork_from` (String) Name of another project to fork a service from. This has effect only when a new service is being created. -- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet. (see [below for nested schema](#nestedblock--cassandra_user_config--public_access)) +- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet (see [below for nested schema](#nestedblock--cassandra_user_config--public_access)) - `service_to_fork_from` (String) Name of another service to fork from. This has effect only when a new service is being created. - `service_to_join_with` (String) When bootstrapping, instead of creating a new Cassandra cluster try to join an existing one from another service. Can only be set on service creation. - `static_ips` (Boolean) Use static public IP addresses. diff --git a/docs/resources/clickhouse.md b/docs/resources/clickhouse.md index 83b50846a..be243af3a 100644 --- a/docs/resources/clickhouse.md +++ b/docs/resources/clickhouse.md @@ -69,14 +69,14 @@ resource "aiven_clickhouse" "clickhouse" { Optional: -- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--clickhouse_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks. (see [below for nested schema](#nestedblock--clickhouse_user_config--private_access)) -- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink. (see [below for nested schema](#nestedblock--clickhouse_user_config--privatelink_access)) +- `additional_backup_regions` (Set of String) Additional Cloud Regions for Backup Replication. +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--clickhouse_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--clickhouse_user_config--private_access)) +- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink (see [below for nested schema](#nestedblock--clickhouse_user_config--privatelink_access)) - `project_to_fork_from` (String) Name of another project to fork a service from. This has effect only when a new service is being created. -- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet. (see [below for nested schema](#nestedblock--clickhouse_user_config--public_access)) +- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet (see [below for nested schema](#nestedblock--clickhouse_user_config--public_access)) - `service_to_fork_from` (String) Name of another service to fork from. This has effect only when a new service is being created. - `static_ips` (Boolean) Use static public IP addresses. diff --git a/docs/resources/flink.md b/docs/resources/flink.md index 9dd5a629b..80e3ab87a 100644 --- a/docs/resources/flink.md +++ b/docs/resources/flink.md @@ -82,11 +82,11 @@ Optional: Optional: - `flink_version` (String) Flink major version. -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--flink_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--flink_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. - `number_of_task_slots` (Number) Task slots per node. For a 3 node plan, total number of task slots is 3x this value. -- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink. (see [below for nested schema](#nestedblock--flink_user_config--privatelink_access)) +- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink (see [below for nested schema](#nestedblock--flink_user_config--privatelink_access)) ### Nested Schema for `flink_user_config.ip_filter_object` diff --git a/docs/resources/grafana.md b/docs/resources/grafana.md index 7c970bdbe..3c84ee86f 100644 --- a/docs/resources/grafana.md +++ b/docs/resources/grafana.md @@ -77,18 +77,18 @@ resource "aiven_grafana" "gr1" { Optional: -- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. +- `additional_backup_regions` (Set of String) Additional Cloud Regions for Backup Replication. - `alerting_enabled` (Boolean) Enable or disable Grafana alerting functionality. - `alerting_error_or_timeout` (String) Default error or timeout setting for new alerting rules. - `alerting_max_annotations_to_keep` (Number) Max number of alert annotations that Grafana stores. 0 (default) keeps all alert annotations. - `alerting_nodata_or_nullvalues` (String) Default value for 'no data or null values' for new alerting rules. - `allow_embedding` (Boolean) Allow embedding Grafana dashboards with iframe/frame/object/embed tags. Disabled by default to limit impact of clickjacking. -- `auth_azuread` (Block List, Max: 1) Azure AD OAuth integration. (see [below for nested schema](#nestedblock--grafana_user_config--auth_azuread)) +- `auth_azuread` (Block List, Max: 1) Azure AD OAuth integration (see [below for nested schema](#nestedblock--grafana_user_config--auth_azuread)) - `auth_basic_enabled` (Boolean) Enable or disable basic authentication form, used by Grafana built-in login. -- `auth_generic_oauth` (Block List, Max: 1) Generic OAuth integration. (see [below for nested schema](#nestedblock--grafana_user_config--auth_generic_oauth)) -- `auth_github` (Block List, Max: 1) Github Auth integration. (see [below for nested schema](#nestedblock--grafana_user_config--auth_github)) -- `auth_gitlab` (Block List, Max: 1) GitLab Auth integration. (see [below for nested schema](#nestedblock--grafana_user_config--auth_gitlab)) -- `auth_google` (Block List, Max: 1) Google Auth integration. (see [below for nested schema](#nestedblock--grafana_user_config--auth_google)) +- `auth_generic_oauth` (Block List, Max: 1) Generic OAuth integration (see [below for nested schema](#nestedblock--grafana_user_config--auth_generic_oauth)) +- `auth_github` (Block List, Max: 1) Github Auth integration (see [below for nested schema](#nestedblock--grafana_user_config--auth_github)) +- `auth_gitlab` (Block List, Max: 1) GitLab Auth integration (see [below for nested schema](#nestedblock--grafana_user_config--auth_gitlab)) +- `auth_google` (Block List, Max: 1) Google Auth integration (see [below for nested schema](#nestedblock--grafana_user_config--auth_google)) - `cookie_samesite` (String) Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value. - `custom_domain` (String) Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. - `dashboard_previews_enabled` (Boolean) This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering. @@ -96,23 +96,23 @@ Optional: - `dashboards_versions_to_keep` (Number) Dashboard versions to keep per dashboard. - `dataproxy_send_user_header` (Boolean) Send 'X-Grafana-User' header to data source. - `dataproxy_timeout` (Number) Timeout for data proxy requests in seconds. -- `date_formats` (Block List, Max: 1) Grafana date format specifications. (see [below for nested schema](#nestedblock--grafana_user_config--date_formats)) +- `date_formats` (Block List, Max: 1) Grafana date format specifications (see [below for nested schema](#nestedblock--grafana_user_config--date_formats)) - `disable_gravatar` (Boolean) Set to true to disable gravatar. Defaults to false (gravatar is enabled). - `editors_can_admin` (Boolean) Editors can manage folders, teams and dashboards created by them. -- `external_image_storage` (Block List, Max: 1) External image store settings. (see [below for nested schema](#nestedblock--grafana_user_config--external_image_storage)) +- `external_image_storage` (Block List, Max: 1) External image store settings (see [below for nested schema](#nestedblock--grafana_user_config--external_image_storage)) - `google_analytics_ua_id` (String) Google Analytics ID. -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--grafana_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--grafana_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. - `metrics_enabled` (Boolean) Enable Grafana /metrics endpoint. - `oauth_allow_insecure_email_lookup` (Boolean) Enforce user lookup based on email instead of the unique ID provided by the IdP. -- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks. (see [below for nested schema](#nestedblock--grafana_user_config--private_access)) -- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink. (see [below for nested schema](#nestedblock--grafana_user_config--privatelink_access)) +- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--grafana_user_config--private_access)) +- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink (see [below for nested schema](#nestedblock--grafana_user_config--privatelink_access)) - `project_to_fork_from` (String) Name of another project to fork a service from. This has effect only when a new service is being created. -- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet. (see [below for nested schema](#nestedblock--grafana_user_config--public_access)) +- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet (see [below for nested schema](#nestedblock--grafana_user_config--public_access)) - `recovery_basebackup_name` (String) Name of the basebackup to restore in forked service. - `service_to_fork_from` (String) Name of another service to fork from. This has effect only when a new service is being created. -- `smtp_server` (Block List, Max: 1) SMTP server settings. (see [below for nested schema](#nestedblock--grafana_user_config--smtp_server)) +- `smtp_server` (Block List, Max: 1) SMTP server settings (see [below for nested schema](#nestedblock--grafana_user_config--smtp_server)) - `static_ips` (Boolean) Use static public IP addresses. - `user_auto_assign_org` (Boolean) Auto-assign new users on signup to main organization. Defaults to false. - `user_auto_assign_org_role` (String) Set role for new signups. Defaults to Viewer. @@ -131,8 +131,8 @@ Required: Optional: - `allow_sign_up` (Boolean) Automatically sign-up users on successful sign-in. -- `allowed_domains` (List of String) Allowed domains. -- `allowed_groups` (List of String) Require users to belong to one of given groups. +- `allowed_domains` (Set of String) Allowed domains. +- `allowed_groups` (Set of String) Require users to belong to one of given groups. @@ -149,11 +149,11 @@ Required: Optional: - `allow_sign_up` (Boolean) Automatically sign-up users on successful sign-in. -- `allowed_domains` (List of String) Allowed domains. -- `allowed_organizations` (List of String) Require user to be member of one of the listed organizations. +- `allowed_domains` (Set of String) Allowed domains. +- `allowed_organizations` (Set of String) Require user to be member of one of the listed organizations. - `auto_login` (Boolean) Allow users to bypass the login screen and automatically log in. - `name` (String) Name of the OAuth integration. -- `scopes` (List of String) OAuth scopes. +- `scopes` (Set of String) OAuth scopes. @@ -167,8 +167,8 @@ Required: Optional: - `allow_sign_up` (Boolean) Automatically sign-up users on successful sign-in. -- `allowed_organizations` (List of String) Require users to belong to one of given organizations. -- `team_ids` (List of Number) Require users to belong to one of given team IDs. +- `allowed_organizations` (Set of String) Require users to belong to one of given organizations. +- `team_ids` (Set of Number) Require users to belong to one of given team IDs. @@ -176,13 +176,13 @@ Optional: Required: +- `allowed_groups` (Set of String) Require users to belong to one of given groups. - `client_id` (String) Client ID from provider. - `client_secret` (String) Client secret from provider. Optional: - `allow_sign_up` (Boolean) Automatically sign-up users on successful sign-in. -- `allowed_groups` (List of String) Require users to belong to one of given groups. - `api_url` (String) API URL. This only needs to be set when using self hosted GitLab. - `auth_url` (String) Authorization URL. This only needs to be set when using self hosted GitLab. - `token_url` (String) Token URL. This only needs to be set when using self hosted GitLab. @@ -193,13 +193,13 @@ Optional: Required: +- `allowed_domains` (Set of String) Domains allowed to sign-in to this Grafana. - `client_id` (String) Client ID from provider. - `client_secret` (String) Client secret from provider. Optional: - `allow_sign_up` (Boolean) Automatically sign-up users on successful sign-in. -- `allowed_domains` (List of String) Domains allowed to sign-in to this Grafana. diff --git a/docs/resources/influxdb.md b/docs/resources/influxdb.md index acb8072cb..bb970489b 100644 --- a/docs/resources/influxdb.md +++ b/docs/resources/influxdb.md @@ -75,16 +75,16 @@ resource "aiven_influxdb" "inf1" { Optional: -- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. +- `additional_backup_regions` (Set of String) Additional Cloud Regions for Backup Replication. - `custom_domain` (String) Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. -- `influxdb` (Block List, Max: 1) influxdb.conf configuration values. (see [below for nested schema](#nestedblock--influxdb_user_config--influxdb)) -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--influxdb_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks. (see [below for nested schema](#nestedblock--influxdb_user_config--private_access)) -- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink. (see [below for nested schema](#nestedblock--influxdb_user_config--privatelink_access)) +- `influxdb` (Block List, Max: 1) influxdb.conf configuration values (see [below for nested schema](#nestedblock--influxdb_user_config--influxdb)) +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--influxdb_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--influxdb_user_config--private_access)) +- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink (see [below for nested schema](#nestedblock--influxdb_user_config--privatelink_access)) - `project_to_fork_from` (String) Name of another project to fork a service from. This has effect only when a new service is being created. -- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet. (see [below for nested schema](#nestedblock--influxdb_user_config--public_access)) +- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet (see [below for nested schema](#nestedblock--influxdb_user_config--public_access)) - `recovery_basebackup_name` (String) Name of the basebackup to restore in forked service. - `service_to_fork_from` (String) Name of another service to fork from. This has effect only when a new service is being created. - `static_ips` (Boolean) Use static public IP addresses. diff --git a/docs/resources/kafka.md b/docs/resources/kafka.md index 94fc4ef95..638bd508f 100644 --- a/docs/resources/kafka.md +++ b/docs/resources/kafka.md @@ -88,24 +88,24 @@ resource "aiven_kafka" "kafka1" { Optional: -- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. +- `additional_backup_regions` (Set of String) Additional Cloud Regions for Backup Replication. - `custom_domain` (String) Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--kafka_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `kafka` (Block List, Max: 1) Kafka broker configuration values. (see [below for nested schema](#nestedblock--kafka_user_config--kafka)) -- `kafka_authentication_methods` (Block List, Max: 1) Kafka authentication methods. (see [below for nested schema](#nestedblock--kafka_user_config--kafka_authentication_methods)) +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--kafka_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `kafka` (Block List, Max: 1) Kafka broker configuration values (see [below for nested schema](#nestedblock--kafka_user_config--kafka)) +- `kafka_authentication_methods` (Block List, Max: 1) Kafka authentication methods (see [below for nested schema](#nestedblock--kafka_user_config--kafka_authentication_methods)) - `kafka_connect` (Boolean) Enable Kafka Connect service. The default value is `false`. -- `kafka_connect_config` (Block List, Max: 1) Kafka Connect configuration values. (see [below for nested schema](#nestedblock--kafka_user_config--kafka_connect_config)) +- `kafka_connect_config` (Block List, Max: 1) Kafka Connect configuration values (see [below for nested schema](#nestedblock--kafka_user_config--kafka_connect_config)) - `kafka_rest` (Boolean) Enable Kafka-REST service. The default value is `false`. - `kafka_rest_authorization` (Boolean) Enable authorization in Kafka-REST service. -- `kafka_rest_config` (Block List, Max: 1) Kafka REST configuration. (see [below for nested schema](#nestedblock--kafka_user_config--kafka_rest_config)) +- `kafka_rest_config` (Block List, Max: 1) Kafka REST configuration (see [below for nested schema](#nestedblock--kafka_user_config--kafka_rest_config)) - `kafka_version` (String) Kafka major version. -- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks. (see [below for nested schema](#nestedblock--kafka_user_config--private_access)) -- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink. (see [below for nested schema](#nestedblock--kafka_user_config--privatelink_access)) -- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet. (see [below for nested schema](#nestedblock--kafka_user_config--public_access)) +- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--kafka_user_config--private_access)) +- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink (see [below for nested schema](#nestedblock--kafka_user_config--privatelink_access)) +- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet (see [below for nested schema](#nestedblock--kafka_user_config--public_access)) - `schema_registry` (Boolean) Enable Schema-Registry service. The default value is `false`. -- `schema_registry_config` (Block List, Max: 1) Schema Registry configuration. (see [below for nested schema](#nestedblock--kafka_user_config--schema_registry_config)) +- `schema_registry_config` (Block List, Max: 1) Schema Registry configuration (see [below for nested schema](#nestedblock--kafka_user_config--schema_registry_config)) - `static_ips` (Boolean) Use static public IP addresses. @@ -132,7 +132,7 @@ Optional: - `group_initial_rebalance_delay_ms` (Number) The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. - `group_max_session_timeout_ms` (Number) The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. - `group_min_session_timeout_ms` (Number) The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. -- `log_cleaner_delete_retention_ms` (Number) How long are delete records retained?. +- `log_cleaner_delete_retention_ms` (Number) How long are delete records retained? - `log_cleaner_max_compaction_lag_ms` (Number) The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. - `log_cleaner_min_cleanable_ratio` (Number) Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. - `log_cleaner_min_compaction_lag_ms` (Number) The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. @@ -144,7 +144,7 @@ Optional: - `log_message_downconversion_enable` (Boolean) This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. . - `log_message_timestamp_difference_max_ms` (Number) The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. - `log_message_timestamp_type` (String) Define whether the timestamp in the message is message create time or log append time. -- `log_preallocate` (Boolean) Should pre allocate file when create new segment?. +- `log_preallocate` (Boolean) Should pre allocate file when create new segment? - `log_retention_bytes` (Number) The maximum size of the log before deleting messages. - `log_retention_hours` (Number) The number of hours to keep a log file before deleting it. - `log_retention_ms` (Number) The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. diff --git a/docs/resources/kafka_connect.md b/docs/resources/kafka_connect.md index f1181fc5d..4cd5a484f 100644 --- a/docs/resources/kafka_connect.md +++ b/docs/resources/kafka_connect.md @@ -79,14 +79,14 @@ resource "aiven_kafka_connect" "kc1" { Optional: -- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--kafka_connect_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `kafka_connect` (Block List, Max: 1) Kafka Connect configuration values. (see [below for nested schema](#nestedblock--kafka_connect_user_config--kafka_connect)) -- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks. (see [below for nested schema](#nestedblock--kafka_connect_user_config--private_access)) -- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink. (see [below for nested schema](#nestedblock--kafka_connect_user_config--privatelink_access)) -- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet. (see [below for nested schema](#nestedblock--kafka_connect_user_config--public_access)) +- `additional_backup_regions` (Set of String) Additional Cloud Regions for Backup Replication. +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--kafka_connect_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `kafka_connect` (Block List, Max: 1) Kafka Connect configuration values (see [below for nested schema](#nestedblock--kafka_connect_user_config--kafka_connect)) +- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--kafka_connect_user_config--private_access)) +- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink (see [below for nested schema](#nestedblock--kafka_connect_user_config--privatelink_access)) +- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet (see [below for nested schema](#nestedblock--kafka_connect_user_config--public_access)) - `static_ips` (Boolean) Use static public IP addresses. diff --git a/docs/resources/kafka_mirrormaker.md b/docs/resources/kafka_mirrormaker.md index 70be653a0..231dcf707 100644 --- a/docs/resources/kafka_mirrormaker.md +++ b/docs/resources/kafka_mirrormaker.md @@ -77,11 +77,11 @@ resource "aiven_kafka_mirrormaker" "mm1" { Optional: -- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `kafka_mirrormaker` (Block List, Max: 1) Kafka MirrorMaker configuration values. (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--kafka_mirrormaker)) +- `additional_backup_regions` (Set of String) Additional Cloud Regions for Backup Replication. +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `kafka_mirrormaker` (Block List, Max: 1) Kafka MirrorMaker configuration values (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--kafka_mirrormaker)) - `static_ips` (Boolean) Use static public IP addresses. diff --git a/docs/resources/m3aggregator.md b/docs/resources/m3aggregator.md index 79c0cffc4..9739a2caa 100644 --- a/docs/resources/m3aggregator.md +++ b/docs/resources/m3aggregator.md @@ -74,10 +74,10 @@ resource "aiven_m3aggregator" "m3a" { Optional: - `custom_domain` (String) Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--m3aggregator_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `m3_version` (String, Deprecated) M3 major version (deprecated, use m3aggregator_version). +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--m3aggregator_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `m3_version` (String) M3 major version (deprecated, use m3aggregator_version). - `m3aggregator_version` (String) M3 major version (the minimum compatible version). - `static_ips` (Boolean) Use static public IP addresses. diff --git a/docs/resources/m3db.md b/docs/resources/m3db.md index 7f2c1ff40..d79882b2f 100644 --- a/docs/resources/m3db.md +++ b/docs/resources/m3db.md @@ -78,21 +78,21 @@ resource "aiven_m3db" "m3" { Optional: -- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. +- `additional_backup_regions` (Set of String) Additional Cloud Regions for Backup Replication. - `custom_domain` (String) Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--m3db_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `limits` (Block List, Max: 1) M3 limits. (see [below for nested schema](#nestedblock--m3db_user_config--limits)) -- `m3` (Block List, Max: 1) M3 specific configuration options. (see [below for nested schema](#nestedblock--m3db_user_config--m3)) -- `m3_version` (String, Deprecated) M3 major version (deprecated, use m3db_version). +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--m3db_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `limits` (Block List, Max: 1) M3 limits (see [below for nested schema](#nestedblock--m3db_user_config--limits)) +- `m3` (Block List, Max: 1) M3 specific configuration options (see [below for nested schema](#nestedblock--m3db_user_config--m3)) +- `m3_version` (String) M3 major version (deprecated, use m3db_version). - `m3coordinator_enable_graphite_carbon_ingest` (Boolean) Enables access to Graphite Carbon plaintext metrics ingestion. It can be enabled only for services inside VPCs. The metrics are written to aggregated namespaces only. - `m3db_version` (String) M3 major version (the minimum compatible version). -- `namespaces` (Block List, Max: 2147483647) List of M3 namespaces. (see [below for nested schema](#nestedblock--m3db_user_config--namespaces)) -- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks. (see [below for nested schema](#nestedblock--m3db_user_config--private_access)) +- `namespaces` (Block List, Max: 2147483647) List of M3 namespaces (see [below for nested schema](#nestedblock--m3db_user_config--namespaces)) +- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--m3db_user_config--private_access)) - `project_to_fork_from` (String) Name of another project to fork a service from. This has effect only when a new service is being created. -- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet. (see [below for nested schema](#nestedblock--m3db_user_config--public_access)) -- `rules` (Block List, Max: 1) M3 rules. (see [below for nested schema](#nestedblock--m3db_user_config--rules)) +- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet (see [below for nested schema](#nestedblock--m3db_user_config--public_access)) +- `rules` (Block List, Max: 1) M3 rules (see [below for nested schema](#nestedblock--m3db_user_config--rules)) - `service_to_fork_from` (String) Name of another service to fork from. This has effect only when a new service is being created. - `static_ips` (Boolean) Use static public IP addresses. @@ -126,7 +126,7 @@ Optional: Optional: -- `tag_options` (Block List, Max: 1) M3 Tag Options. (see [below for nested schema](#nestedblock--m3db_user_config--m3--tag_options)) +- `tag_options` (Block List, Max: 1) M3 Tag Options (see [below for nested schema](#nestedblock--m3db_user_config--m3--tag_options)) ### Nested Schema for `m3db_user_config.m3.tag_options` @@ -148,15 +148,18 @@ Required: Optional: -- `options` (Block List, Max: 1) Namespace options. (see [below for nested schema](#nestedblock--m3db_user_config--namespaces--options)) +- `options` (Block List, Max: 1) Namespace options (see [below for nested schema](#nestedblock--m3db_user_config--namespaces--options)) - `resolution` (String) The resolution for an aggregated namespace. ### Nested Schema for `m3db_user_config.namespaces.options` +Required: + +- `retention_options` (Block List, Min: 1, Max: 1) Retention options (see [below for nested schema](#nestedblock--m3db_user_config--namespaces--options--retention_options)) + Optional: -- `retention_options` (Block List, Max: 1) Retention options. (see [below for nested schema](#nestedblock--m3db_user_config--namespaces--options--retention_options)) - `snapshot_enabled` (Boolean) Controls whether M3DB will create snapshot files for this namespace. - `writes_to_commitlog` (Boolean) Controls whether M3DB will include writes to this namespace in the commitlog. @@ -195,7 +198,7 @@ Optional: Optional: -- `mapping` (Block List, Max: 10) List of M3 mapping rules. (see [below for nested schema](#nestedblock--m3db_user_config--rules--mapping)) +- `mapping` (Block List, Max: 10) List of M3 mapping rules (see [below for nested schema](#nestedblock--m3db_user_config--rules--mapping)) ### Nested Schema for `m3db_user_config.rules.mapping` @@ -206,20 +209,23 @@ Required: Optional: -- `aggregations` (List of String) List of aggregations to be applied. +- `aggregations` (Set of String) List of aggregations to be applied. - `drop` (Boolean) Only store the derived metric (as specified in the roll-up rules), if any. - `name` (String) The (optional) name of the rule. -- `namespaces` (List of String, Deprecated) This rule will be used to store the metrics in the given namespace(s). If a namespace is target of rules, the global default aggregation will be automatically disabled. Note that specifying filters that match no namespaces whatsoever will be returned as an error. Filter the namespace by glob (=wildcards). -- `namespaces_object` (Block List, Max: 10) This rule will be used to store the metrics in the given namespace(s). If a namespace is target of rules, the global default aggregation will be automatically disabled. Note that specifying filters that match no namespaces whatsoever will be returned as an error. Filter the namespace by exact match of retention period and resolution. (see [below for nested schema](#nestedblock--m3db_user_config--rules--mapping--namespaces_object)) -- `namespaces_string` (List of String) This rule will be used to store the metrics in the given namespace(s). If a namespace is target of rules, the global default aggregation will be automatically disabled. Note that specifying filters that match no namespaces whatsoever will be returned as an error. Filter the namespace by glob (=wildcards). -- `tags` (Block List, Max: 10) List of tags to be appended to matching metrics. (see [below for nested schema](#nestedblock--m3db_user_config--rules--mapping--tags)) +- `namespaces` (Set of String, Deprecated) This rule will be used to store the metrics in the given namespace(s). If a namespace is target of rules, the global default aggregation will be automatically disabled. Note that specifying filters that match no namespaces whatsoever will be returned as an error. Filter the namespace by glob (=wildcards). +- `namespaces_object` (Block List, Max: 10) This rule will be used to store the metrics in the given namespace(s). If a namespace is target of rules, the global default aggregation will be automatically disabled. Note that specifying filters that match no namespaces whatsoever will be returned as an error. Filter the namespace by exact match of retention period and resolution (see [below for nested schema](#nestedblock--m3db_user_config--rules--mapping--namespaces_object)) +- `namespaces_string` (Set of String) This rule will be used to store the metrics in the given namespace(s). If a namespace is target of rules, the global default aggregation will be automatically disabled. Note that specifying filters that match no namespaces whatsoever will be returned as an error. Filter the namespace by glob (=wildcards). +- `tags` (Block List, Max: 10) List of tags to be appended to matching metrics (see [below for nested schema](#nestedblock--m3db_user_config--rules--mapping--tags)) ### Nested Schema for `m3db_user_config.rules.mapping.namespaces_object` -Optional: +Required: - `resolution` (String) The resolution for the matching namespace. + +Optional: + - `retention` (String) The retention period of the matching namespace. diff --git a/docs/resources/mysql.md b/docs/resources/mysql.md index 17f36a8a0..e3b160452 100644 --- a/docs/resources/mysql.md +++ b/docs/resources/mysql.md @@ -82,22 +82,22 @@ resource "aiven_mysql" "mysql1" { Optional: -- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. +- `additional_backup_regions` (Set of String) Additional Cloud Regions for Backup Replication. - `admin_password` (String, Sensitive) Custom password for admin user. Defaults to random string. This must be set only when a new service is being created. - `admin_username` (String) Custom username for admin user. This must be set only when a new service is being created. - `backup_hour` (Number) The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed. - `backup_minute` (Number) The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed. - `binlog_retention_period` (Number) The minimum amount of time in seconds to keep binlog entries before deletion. This may be extended for services that require binlog entries for longer than the default for example if using the MySQL Debezium Kafka connector. -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--mysql_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `migration` (Block List, Max: 1) Migrate data from existing server. (see [below for nested schema](#nestedblock--mysql_user_config--migration)) -- `mysql` (Block List, Max: 1) mysql.conf configuration values. (see [below for nested schema](#nestedblock--mysql_user_config--mysql)) +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--mysql_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `migration` (Block List, Max: 1) Migrate data from existing server (see [below for nested schema](#nestedblock--mysql_user_config--migration)) +- `mysql` (Block List, Max: 1) mysql.conf configuration values (see [below for nested schema](#nestedblock--mysql_user_config--mysql)) - `mysql_version` (String) MySQL major version. -- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks. (see [below for nested schema](#nestedblock--mysql_user_config--private_access)) -- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink. (see [below for nested schema](#nestedblock--mysql_user_config--privatelink_access)) +- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--mysql_user_config--private_access)) +- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink (see [below for nested schema](#nestedblock--mysql_user_config--privatelink_access)) - `project_to_fork_from` (String) Name of another project to fork a service from. This has effect only when a new service is being created. -- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet. (see [below for nested schema](#nestedblock--mysql_user_config--public_access)) +- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet (see [below for nested schema](#nestedblock--mysql_user_config--public_access)) - `recovery_target_time` (String) Recovery target time when forking a service. This has effect only when a new service is being created. - `service_to_fork_from` (String) Name of another service to fork from. This has effect only when a new service is being created. - `static_ips` (Boolean) Use static public IP addresses. diff --git a/docs/resources/opensearch.md b/docs/resources/opensearch.md index e3a15be4f..cc150f374 100644 --- a/docs/resources/opensearch.md +++ b/docs/resources/opensearch.md @@ -83,26 +83,26 @@ resource "aiven_opensearch" "os1" { Optional: -- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. +- `additional_backup_regions` (Set of String) Additional Cloud Regions for Backup Replication. - `custom_domain` (String) Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. -- `disable_replication_factor_adjustment` (Boolean, Deprecated) Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can no longer be activated. -- `index_patterns` (Block List, Max: 512) Index patterns. (see [below for nested schema](#nestedblock--opensearch_user_config--index_patterns)) -- `index_template` (Block List, Max: 1) Template settings for all new indexes. (see [below for nested schema](#nestedblock--opensearch_user_config--index_template)) -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--opensearch_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `disable_replication_factor_adjustment` (Boolean) Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can no longer be activated. +- `index_patterns` (Block List, Max: 512) Index patterns (see [below for nested schema](#nestedblock--opensearch_user_config--index_patterns)) +- `index_template` (Block List, Max: 1) Template settings for all new indexes (see [below for nested schema](#nestedblock--opensearch_user_config--index_template)) +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--opensearch_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. - `keep_index_refresh_interval` (Boolean) Aiven automation resets index.refresh_interval to default value for every index to be sure that indices are always visible to search. If it doesn't fit your case, you can disable this by setting up this flag to true. -- `max_index_count` (Number, Deprecated) Use index_patterns instead. The default value is `0`. -- `openid` (Block List, Max: 1) OpenSearch OpenID Connect Configuration. (see [below for nested schema](#nestedblock--opensearch_user_config--openid)) -- `opensearch` (Block List, Max: 1) OpenSearch settings. (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch)) -- `opensearch_dashboards` (Block List, Max: 1) OpenSearch Dashboards settings. (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch_dashboards)) +- `max_index_count` (Number) use index_patterns instead. The default value is `0`. +- `openid` (Block List, Max: 1) OpenSearch OpenID Connect Configuration (see [below for nested schema](#nestedblock--opensearch_user_config--openid)) +- `opensearch` (Block List, Max: 1) OpenSearch settings (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch)) +- `opensearch_dashboards` (Block List, Max: 1) OpenSearch Dashboards settings (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch_dashboards)) - `opensearch_version` (String) OpenSearch major version. -- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks. (see [below for nested schema](#nestedblock--opensearch_user_config--private_access)) -- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink. (see [below for nested schema](#nestedblock--opensearch_user_config--privatelink_access)) +- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--opensearch_user_config--private_access)) +- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink (see [below for nested schema](#nestedblock--opensearch_user_config--privatelink_access)) - `project_to_fork_from` (String) Name of another project to fork a service from. This has effect only when a new service is being created. -- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet. (see [below for nested schema](#nestedblock--opensearch_user_config--public_access)) +- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet (see [below for nested schema](#nestedblock--opensearch_user_config--public_access)) - `recovery_basebackup_name` (String) Name of the basebackup to restore in forked service. -- `saml` (Block List, Max: 1) OpenSearch SAML configuration. (see [below for nested schema](#nestedblock--opensearch_user_config--saml)) +- `saml` (Block List, Max: 1) OpenSearch SAML configuration (see [below for nested schema](#nestedblock--opensearch_user_config--saml)) - `service_to_fork_from` (String) Name of another service to fork from. This has effect only when a new service is being created. - `static_ips` (Boolean) Use static public IP addresses. @@ -172,7 +172,7 @@ Optional: - `action_destructive_requires_name` (Boolean) Require explicit index names when deleting. - `cluster_max_shards_per_node` (Number) Controls the number of shards allowed in the cluster per data node. - `cluster_routing_allocation_node_concurrent_recoveries` (Number) How many concurrent incoming/outgoing shard recoveries (normally replicas) are allowed to happen on a node. Defaults to 2. -- `email_sender_name` (String) This should be identical to the Sender name defined in Opensearch dashboards. +- `email_sender_name` (String) Sender name placeholder to be used in Opensearch Dashboards and Opensearch keystore. - `email_sender_password` (String, Sensitive) Sender password for Opensearch alerts to authenticate with SMTP server. - `email_sender_username` (String) Sender username for Opensearch alerts. - `http_max_content_length` (Number) Maximum content length for HTTP requests to the OpenSearch HTTP API, in bytes. @@ -185,7 +185,7 @@ Optional: - `indices_recovery_max_bytes_per_sec` (Number) Limits total inbound and outbound recovery traffic for each node. Applies to both peer recoveries as well as snapshot recoveries (i.e., restores from a snapshot). Defaults to 40mb. - `indices_recovery_max_concurrent_file_chunks` (Number) Number of file chunks sent in parallel for each recovery. Defaults to 2. - `override_main_response_version` (Boolean) Compatibility mode sets OpenSearch to report its version as 7.10 so clients continue to work. Default is false. -- `reindex_remote_whitelist` (List of String) Whitelisted addresses for reindexing. Changing this value will cause all OpenSearch instances to restart. +- `reindex_remote_whitelist` (Set of String) Whitelisted addresses for reindexing. Changing this value will cause all OpenSearch instances to restart. - `script_max_compilations_rate` (String) Script compilation circuit breaker limits the number of inline script compilations within a period of time. Default is use-context. - `search_max_buckets` (Number) Maximum number of aggregation buckets allowed in a single response. OpenSearch default value is used when this is not defined. - `thread_pool_analyze_queue_size` (Number) Size for the thread pool queue. See documentation for exact details. diff --git a/docs/resources/pg.md b/docs/resources/pg.md index b4802935a..40f8347aa 100644 --- a/docs/resources/pg.md +++ b/docs/resources/pg.md @@ -115,33 +115,33 @@ Read-Only: Optional: -- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. +- `additional_backup_regions` (Set of String) Additional Cloud Regions for Backup Replication. - `admin_password` (String, Sensitive) Custom password for admin user. Defaults to random string. This must be set only when a new service is being created. - `admin_username` (String) Custom username for admin user. This must be set only when a new service is being created. - `backup_hour` (Number) The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed. - `backup_minute` (Number) The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed. - `enable_ipv6` (Boolean) Register AAAA DNS records for the service, and allow IPv6 packets to service ports. -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--pg_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `migration` (Block List, Max: 1) Migrate data from existing server. (see [below for nested schema](#nestedblock--pg_user_config--migration)) -- `pg` (Block List, Max: 1) postgresql.conf configuration values. (see [below for nested schema](#nestedblock--pg_user_config--pg)) -- `pg_read_replica` (Boolean, Deprecated) Use read_replica service integration instead. -- `pg_service_to_fork_from` (String, Deprecated) Name of the PG Service from which to fork (deprecated, use service_to_fork_from). This has effect only when a new service is being created. +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--pg_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `migration` (Block List, Max: 1) Migrate data from existing server (see [below for nested schema](#nestedblock--pg_user_config--migration)) +- `pg` (Block List, Max: 1) postgresql.conf configuration values (see [below for nested schema](#nestedblock--pg_user_config--pg)) +- `pg_read_replica` (Boolean) Should the service which is being forked be a read replica (deprecated, use read_replica service integration instead). +- `pg_service_to_fork_from` (String) Name of the PG Service from which to fork (deprecated, use service_to_fork_from). This has effect only when a new service is being created. - `pg_stat_monitor_enable` (Boolean) Enable the pg_stat_monitor extension. Enabling this extension will cause the cluster to be restarted.When this extension is enabled, pg_stat_statements results for utility commands are unreliable. The default value is `false`. - `pg_version` (String) PostgreSQL major version. -- `pgbouncer` (Block List, Max: 1) PGBouncer connection pooling settings. (see [below for nested schema](#nestedblock--pg_user_config--pgbouncer)) -- `pglookout` (Block List, Max: 1) PGLookout settings. (see [below for nested schema](#nestedblock--pg_user_config--pglookout)) -- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks. (see [below for nested schema](#nestedblock--pg_user_config--private_access)) -- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink. (see [below for nested schema](#nestedblock--pg_user_config--privatelink_access)) +- `pgbouncer` (Block List, Max: 1) PGBouncer connection pooling settings (see [below for nested schema](#nestedblock--pg_user_config--pgbouncer)) +- `pglookout` (Block List, Max: 1) PGLookout settings (see [below for nested schema](#nestedblock--pg_user_config--pglookout)) +- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--pg_user_config--private_access)) +- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink (see [below for nested schema](#nestedblock--pg_user_config--privatelink_access)) - `project_to_fork_from` (String) Name of another project to fork a service from. This has effect only when a new service is being created. -- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet. (see [below for nested schema](#nestedblock--pg_user_config--public_access)) +- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet (see [below for nested schema](#nestedblock--pg_user_config--public_access)) - `recovery_target_time` (String) Recovery target time when forking a service. This has effect only when a new service is being created. - `service_to_fork_from` (String) Name of another service to fork from. This has effect only when a new service is being created. - `shared_buffers_percentage` (Number) Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value. - `static_ips` (Boolean) Use static public IP addresses. - `synchronous_replication` (String) Synchronous replication type. Note that the service plan also needs to support synchronous replication. -- `timescaledb` (Block List, Max: 1) TimescaleDB extension configuration values. (see [below for nested schema](#nestedblock--pg_user_config--timescaledb)) +- `timescaledb` (Block List, Max: 1) TimescaleDB extension configuration values (see [below for nested schema](#nestedblock--pg_user_config--timescaledb)) - `variant` (String) Variant of the PostgreSQL service, may affect the features that are exposed by default. - `work_mem` (Number) Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB). @@ -240,7 +240,7 @@ Optional: - `autodb_max_db_connections` (Number) Do not allow more than this many server connections per database (regardless of user). Setting it to 0 means unlimited. - `autodb_pool_mode` (String) PGBouncer pool mode. - `autodb_pool_size` (Number) If non-zero then create automatically a pool of that size per user when a pool doesn't exist. -- `ignore_startup_parameters` (List of String) List of parameters to ignore when given in startup packet. +- `ignore_startup_parameters` (Set of String) List of parameters to ignore when given in startup packet. - `min_pool_size` (Number) Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size. - `server_idle_timeout` (Number) If a server connection has been idle more than this many seconds it will be dropped. If 0 then timeout is disabled. (seconds). - `server_lifetime` (Number) The pooler will close an unused server connection that has been connected longer than this. (seconds). diff --git a/docs/resources/redis.md b/docs/resources/redis.md index ae49a79e7..f92ad9bb4 100644 --- a/docs/resources/redis.md +++ b/docs/resources/redis.md @@ -77,15 +77,15 @@ resource "aiven_redis" "redis1" { Optional: -- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. -- `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--redis_user_config--ip_filter_object)) -- `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `migration` (Block List, Max: 1) Migrate data from existing server. (see [below for nested schema](#nestedblock--redis_user_config--migration)) -- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks. (see [below for nested schema](#nestedblock--redis_user_config--private_access)) -- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink. (see [below for nested schema](#nestedblock--redis_user_config--privatelink_access)) +- `additional_backup_regions` (Set of String) Additional Cloud Regions for Backup Replication. +- `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--redis_user_config--ip_filter_object)) +- `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. +- `migration` (Block List, Max: 1) Migrate data from existing server (see [below for nested schema](#nestedblock--redis_user_config--migration)) +- `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--redis_user_config--private_access)) +- `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink (see [below for nested schema](#nestedblock--redis_user_config--privatelink_access)) - `project_to_fork_from` (String) Name of another project to fork a service from. This has effect only when a new service is being created. -- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet. (see [below for nested schema](#nestedblock--redis_user_config--public_access)) +- `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet (see [below for nested schema](#nestedblock--redis_user_config--public_access)) - `recovery_basebackup_name` (String) Name of the basebackup to restore in forked service. - `redis_acl_channels_default` (String) Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default. - `redis_io_threads` (Number) Set Redis IO thread count. Changing this will cause a restart of the Redis service. diff --git a/go.mod b/go.mod index ede4fd69d..9a5781cbd 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.21.1 require ( github.com/aiven/aiven-go-client/v2 v2.1.0 + github.com/avast/retry-go v3.0.0+incompatible github.com/dave/jennifer v1.7.0 github.com/docker/go-units v0.5.0 github.com/ettle/strcase v0.1.1 @@ -15,9 +16,12 @@ require ( github.com/hashicorp/terraform-plugin-mux v0.12.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.29.0 github.com/kelseyhightower/envconfig v1.4.0 + github.com/liip/sheriff v0.11.1 + github.com/stoewer/go-strcase v1.3.0 github.com/stretchr/testify v1.8.4 golang.org/x/exp v0.0.0-20230809150735-7b3493d9a819 golang.org/x/sync v0.3.0 + golang.org/x/tools v0.6.0 gopkg.in/yaml.v3 v3.0.1 ) diff --git a/go.sum b/go.sum index e2755f8ee..0225eebf5 100644 --- a/go.sum +++ b/go.sum @@ -212,6 +212,8 @@ github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJE github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0= +github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= github.com/aws/aws-sdk-go v1.44.122 h1:p6mw01WBaNpbdP2xrisz5tIkcNwzj/HysobNoaAHjgo= github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= @@ -404,6 +406,7 @@ github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoD github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v0.0.0-20161031182605-e96d38404026/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -472,6 +475,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/liip/sheriff v0.11.1 h1:52YGzskXFPSEnwfEtXnbPiMKKXJGm5IP45s8Ogw0Wyk= +github.com/liip/sheriff v0.11.1/go.mod h1:nVTQYHxfdIfOHnk5FREt4j6cnaSlJPUfXFVORfgGmTo= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= @@ -522,6 +527,8 @@ github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJC github.com/skeema/knownhosts v1.2.0/go.mod h1:g4fPeYpque7P0xefxtGzV81ihjC8sX2IqpAoNkjxbMo= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= diff --git a/internal/schemautil/converters.go b/internal/schemautil/converters.go new file mode 100644 index 000000000..89261ad85 --- /dev/null +++ b/internal/schemautil/converters.go @@ -0,0 +1,426 @@ +package schemautil + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" +) + +const userConfigSuffix = "_user_config" + +func ExpandService(kind string, d *schema.ResourceData) (map[string]any, error) { + o, err := expand(kind, service.GetUserConfig(kind), d) + expandDeprecatedKeys(o) + log.Printf("sent %s", toJson(o)) + return o, err +} + +func FlattenService(kind string, d *schema.ResourceData, dto map[string]any) ([]map[string]any, error) { + return flatten(kind, service.GetUserConfig(kind), d, dto) +} + +// expand converts schema.ResourceData into DTO map +func expand(kind string, s *schema.Schema, d *schema.ResourceData) (map[string]any, error) { + key := kind + userConfigSuffix + state := &stateCompose{ + key: key, + path: key + ".0", + schema: s, + resource: d, + } + + // When "configs" is empty, then we need to delete all arrays in it + configs := d.GetRawConfig().GetAttr(key).AsValueSlice() + if len(configs) > 0 { + state.config = configs[0] + } + return expandObj(state) +} + +type stateCompose struct { + key string + path string + schema *schema.Schema + config cty.Value + resource *schema.ResourceData +} + +func (s *stateCompose) listStates() (result []*stateCompose) { + if s.config.IsNull() { + // panics if null called AsValueSlice + return result + } + for i, v := range s.config.AsValueSlice() { + c := &stateCompose{ + key: s.key, + path: fmt.Sprintf("%s.%d", s.path, i), + schema: s.schema, + config: v, + resource: s.resource, + } + result = append(result, c) + } + return result +} + +func (s *stateCompose) properties() map[string]*stateCompose { + props := make(map[string]*stateCompose) + res := s.schema.Elem.(*schema.Resource) + for key, subSchema := range res.Schema { + if subSchema.ForceNew && !s.resource.IsNewResource() { + continue + } + + var config cty.Value + if !s.config.IsNull() { + // Can't get value from nil + config = s.config.GetAttr(key) + } + + p := &stateCompose{ + key: key, + path: fmt.Sprintf("%s.%s", s.path, key), + resource: s.resource, + config: config, + schema: subSchema, + } + + props[key] = p + } + return props +} +func (s *stateCompose) get() any { + return s.resource.Get(s.path) +} + +func (s *stateCompose) inRemoteState() bool { + _, ok := s.resource.GetOk(s.path) + log.Printf("in remote state %q hasChange=%v, hasValue=%v", s.key, s.resource.HasChange(s.path), ok) + return !s.resource.HasChange(s.path) && ok +} + +func expandObj(state *stateCompose) (map[string]any, error) { + m := make(map[string]any) + for k, v := range state.properties() { + value, err := expandAttr(v) + if err != nil { + return nil, fmt.Errorf("%q field conversion error: %w", k, err) + } + if value != nil { + m[k] = value + } + } + return m, nil +} + +// expandAttr returns go value +func expandAttr(state *stateCompose) (any, error) { + data := state.get() + dataIsNull := state.config.IsNull() + + switch state.schema.Type { + case schema.TypeString, schema.TypeBool, schema.TypeInt, schema.TypeFloat: + if dataIsNull { + // Null scalar, no value in the config + return nil, nil + } + return data, nil + } + + if state.schema.Type == schema.TypeSet { + if dataIsNull && !state.inRemoteState() { + // A value that's haven't been set yet + log.Printf("%q doesnt have remote state", state.key) + return nil, nil + } + + // Makes possible to send ip_filter=[], for instance, to remove default value "0.0.0.0/0" + return valuesWithState(state.config, data.(*schema.Set).List()), nil + } + + // schema.TypeList + states := state.listStates() + items := make([]any, 0, len(states)) + for i := range states { + exp, err := expandObj(states[i]) + if err != nil { + return nil, err + } + // If an object is not empty + if exp != nil && len(exp) > 0 { + items = append(items, exp) + } + } + + if state.schema.MaxItems == 1 { + if len(items) == 0 { + return nil, nil + } + return items[0], nil + } + return items, nil +} + +func castType[T any](v any) (T, error) { + t, ok := v.(T) + if !ok { + var empty T + return empty, fmt.Errorf("invalid type. Expected %T, got %T", empty, v) + } + return t, nil +} + +func createOnlyFields() []string { + return []string{ + "admin_username", + "admin_password", + } +} + +func expandDeprecatedKeys(o map[string]any) { + m := map[string][]string{ + "ip_filter": { + "ip_filter_object", + "ip_filter_string", + }, + "namespaces": { + "namespaces_object", + "namespaces_string", + }, + } + + for orig, fakes := range m { + for _, fake := range fakes { + v, ok := o[fake] + if ok && reflect.ValueOf(&v).Elem().IsZero() { + o[orig] = v + } + delete(o, fake) + } + } +} + +// flatten converts DTO into a terraform compatible object +func flatten(kind string, s *schema.Schema, d *schema.ResourceData, dto map[string]any) ([]map[string]any, error) { + path := fmt.Sprintf("%s.0.", kind+userConfigSuffix) + + flattenDeprecatedKey(d, path, "ip_filter", dto) + flattenDeprecatedKey(d, path, "namespaces", dto) + sortObjectValues(d, path, dto, "ip_filter_object", "network") + sortObjectValues(d, path, dto, "namespaces", "name") + + for _, k := range createOnlyFields() { + v, ok := d.GetOk(path + k) + if ok { + dto[k] = v + } + } + + log.Printf("dto raw %s", toJson(dto)) + + r := s.Elem.(*schema.Resource) + tfo, err := flattenObj(r.Schema, dto) + if tfo == nil || err != nil { + return nil, err + } + return []map[string]any{tfo}, nil +} + +func flattenObj(s map[string]*schema.Schema, dto map[string]any) (map[string]any, error) { + tfo := make(map[string]any) + for k, v := range s { + vv, ok := dto[k] + if !ok { + continue + } + + if vv == nil { + continue + } + + value, err := flattenAttr(v, vv) + if err != nil { + return nil, fmt.Errorf("%q field conversion error: %w", k, err) + } + + if value != nil { + tfo[k] = value + } + } + if len(tfo) == 0 { + return nil, nil + } + return tfo, nil +} + +func flattenAttr(s *schema.Schema, data any) (any, error) { + switch s.Type { + case schema.TypeString: + return castType[string](data) + case schema.TypeBool: + return castType[bool](data) + case schema.TypeInt: + i, err := data.(json.Number).Int64() + return int(i), err + case schema.TypeFloat: + return data.(json.Number).Float64() + } + + scalarSchema, isScalar := s.Elem.(*schema.Schema) + if isScalar { + values := make([]any, 0) + for _, v := range data.([]any) { + val, err := flattenAttr(scalarSchema, v) + if err != nil { + return nil, err + } + values = append(values, val) + } + return schema.NewSet(schema.HashSchema(scalarSchema), values), nil + } + + // Single object or list of objects + r := s.Elem.(*schema.Resource) + if s.Type == schema.TypeList { + var list []any + if o, isObject := data.(map[string]any); isObject { + // Single object + if len(o) != 0 { + list = append(list, o) + } + } else { + // List of objects + list = data.([]any) + } + + return flattenList(r.Schema, list) + } + + // Array of scalars + items, err := flattenList(r.Schema, data.([]any)) + if items == nil || err != nil { + return nil, err + } + + return schema.NewSet(schema.HashResource(r), items), nil +} + +func flattenList(s map[string]*schema.Schema, list []any) ([]any, error) { + if len(list) == 0 { + return nil, nil + } + + items := make([]any, 0, len(list)) + for _, item := range list { + v, err := flattenObj(s, item.(map[string]any)) + if err != nil { + return nil, err + } + if v != nil { + items = append(items, v) + } + } + return items, nil +} + +func flattenDeprecatedKey(d *schema.ResourceData, path, key string, m map[string]any) { + items, ok := m[key].([]any) + if !ok || len(items) == 0 { + return + } + + _, ok = items[0].(map[string]any) + if ok { + // It's an object! + m[key+"_object"] = items + delete(m, key) + return + } + + str := key + "_string" + if _, ok := d.GetOk(str); ok { + m[str] = items + delete(m, str) + return + } + return + +} + +func sortObjectValues(d *schema.ResourceData, path string, dto map[string]any, key, sortBy string) { + filters, ok := d.GetOk(path + key) + if !ok { + return + } + + data, ok := dto[key] + if !ok { + return + } + + sortMap := make(map[string]int) + for i, v := range filters.([]any) { + m := v.(map[string]any) + sortMap[m[sortBy].(string)] = i + } + + if len(sortMap) == 0 { + return + } + + sorted := make([]map[string]any, 0) + for _, v := range data.([]any) { + sorted = append(sorted, v.(map[string]any)) + } + + sort.Slice(sorted, func(i, j int) bool { + ii := sorted[i][sortBy].(string) + jj := sorted[j][sortBy].(string) + return sortMap[ii] > sortMap[jj] + }) + + // Need to cast to "any", + // otherwise it might blow up in flattenObj function + // with type mismatch (map[string]any vs any) + result := make([]any, 0, len(sorted)) + for _, v := range sorted { + result = append(result, v) + } + dto[key] = result +} + +func toJson(a any) string { + b, _ := json.MarshalIndent(a, "", "\t") + return string(b) +} + +// valuesWithState removes values not presented in state +func valuesWithState(set cty.Value, values []any) []any { + result := make([]any, 0) + if set.IsNull() { + return result + } + + m := make(map[string]bool, set.LengthInt()) + for _, s := range set.AsValueSlice() { + if s.Type() == cty.String { + m[s.AsString()] = true + } else { + m[s.AsBigFloat().String()] = true + } + } + + for _, v := range values { + if m[fmt.Sprintf("%v", v)] { + result = append(result, v) + } + } + return result +} diff --git a/internal/schemautil/service.go b/internal/schemautil/service.go index 785785011..2d772af90 100644 --- a/internal/schemautil/service.go +++ b/internal/schemautil/service.go @@ -12,9 +12,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/apiconvert" ) // defaultTimeout is the default timeout for service operations. This is not a const because it can be changed during @@ -401,7 +398,7 @@ func resourceServiceCreate(ctx context.Context, d *schema.ResourceData, m interf return diag.Errorf("error getting project VPC ID: %s", err) } - cuc, err := apiconvert.ToAPI(userconfig.ServiceTypes, serviceType, d) + cuc, err := ExpandService(serviceType, d) if err != nil { return diag.Errorf( "error converting user config options for service type %s to API format: %s", serviceType, err, @@ -488,12 +485,11 @@ func ResourceServiceUpdate(ctx context.Context, d *schema.ResourceData, m interf return diag.Errorf("error getting project VPC ID: %s", err) } - st := d.Get("service_type").(string) - - cuc, err := apiconvert.ToAPI(userconfig.ServiceTypes, st, d) + serviceType := d.Get("service_type").(string) + cuc, err := ExpandService(serviceType, d) if err != nil { return diag.Errorf( - "error converting user config options for service type %s to API format: %s", st, err, + "error converting user config options for service type %s to API format: %s", serviceType, err, ) } @@ -657,37 +653,11 @@ func copyServicePropertiesFromAPIResponseToTerraform( } } - oldUserConfig, err := unmarshalUserConfig(d.Get(serviceType + "_user_config")) + newUserConfig, err := FlattenService(serviceType, d, s.UserConfig) if err != nil { return err } - newUserConfig, err := apiconvert.FromAPI(userconfig.ServiceTypes, serviceType, s.UserConfig) - if err != nil { - return err - } - - // Apply in-place user config mutations. - if len(oldUserConfig)*len(newUserConfig) != 0 { - oldUserConfigFirst := oldUserConfig[0] - - newUserConfigFirst := newUserConfig[0] - - // TODO: Remove when the remote schema in Aiven begins to contain information about sensitive fields. - copySensitiveFields(oldUserConfigFirst, newUserConfigFirst) - - // TODO: Remove when we no longer need to support the deprecated `ip_filter` field. - if _, exists := d.GetOk(serviceType + "_user_config.0.ip_filter_string"); exists { - stringSuffixForIPFilters(newUserConfigFirst) - } - - if _, exists := d.GetOk(serviceType + "_user_config.0.rules.0.mapping.0.namespaces_string"); exists { - stringSuffixForNamespaces(newUserConfigFirst) - } - - normalizeIPFilter(oldUserConfigFirst, newUserConfigFirst) - } - if err := d.Set(serviceType+"_user_config", newUserConfig); err != nil { return fmt.Errorf("cannot set `%s_user_config` : %s; Please make sure that all Aiven services have unique s names", serviceType, err) } diff --git a/internal/schemautil/userconfig/apiconvert/fromapi.go b/internal/schemautil/userconfig/apiconvert/fromapi.go index b55c0c52e..9d103c49d 100644 --- a/internal/schemautil/userconfig/apiconvert/fromapi.go +++ b/internal/schemautil/userconfig/apiconvert/fromapi.go @@ -6,240 +6,248 @@ import ( "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig" ) -// sliceHasNestedProperties is a function that checks if the given slice has nested properties. -func sliceHasNestedProperties(vr interface{}, va map[string]interface{}) (map[string]interface{}, bool) { - var res map[string]interface{} - - // rok is the resulting ok value. - var rok bool - - vra, ok := vr.([]interface{}) - if !ok { - return res, rok +// hasNestedProperties is a function that returns a map of nested properties and a boolean indicating whether the given +// value has nested properties. +func hasNestedProperties( + valueReceived any, + valueAttributes map[string]any, +) (map[string]any, bool) { + var properties map[string]any + var resultOk bool + + valueReceivedAsArray, isArray := valueReceived.([]any) + if !isArray { + return properties, resultOk } - for _, v := range vra { - if p, ok := v.(map[string]interface{}); ok && p != nil { - rok = true - + for _, value := range valueReceivedAsArray { + if property, isPropertyMap := value.(map[string]any); isPropertyMap && property != nil { + resultOk = true break } } - if i, ok := va["items"].(map[string]interface{}); ok && rok { - if p, ok := i["properties"].(map[string]interface{}); ok { - res = p + if itemsProperty, isArray := valueAttributes["items"].(map[string]any); isArray && resultOk { + if propertyValuesRaw, isPropertyMap := itemsProperty["properties"].(map[string]any); isPropertyMap { + properties = propertyValuesRaw } } - if rok && len(res) == 0 { - rok = false + if resultOk && len(properties) == 0 { + resultOk = false } - return res, rok + return properties, resultOk } -// unsettedAPIValue is a function that returns an unsetted value with the given type. -func unsettedAPIValue(t string) interface{} { - var res interface{} +// unsetAPIValue is a function that returns an unset value for a given type. +func unsetAPIValue(valueType string) any { + var unsetValue any - switch t { + switch valueType { case "boolean": - res = false + unsetValue = false case "integer": - res = 0 + unsetValue = 0 case "number": - res = float64(0) + unsetValue = float64(0) case "string": - res = "" + unsetValue = "" case "array": - res = []interface{}{} + unsetValue = []any{} } - return res + return unsetValue } -// propsFromAPI is a function that converts filled API response properties to Terraform user configuration schema. -func propsFromAPI(n string, r map[string]interface{}, p map[string]interface{}) (map[string]interface{}, error) { - res := make(map[string]interface{}, len(p)) - - for k, v := range p { - va, ok := v.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("%s...%s: property is not a map", n, k) +// parsePropertiesFromAPI is a function that returns a map of properties parsed from an API response. +func parsePropertiesFromAPI( + nestedName string, + responseMapping map[string]any, + propertyMapping map[string]any, +) (map[string]any, error) { + propertyMappingCopy := make(map[string]any, len(propertyMapping)) + + for key, value := range propertyMapping { + valueAttributes, isMap := value.(map[string]any) + if !isMap { + return nil, fmt.Errorf("%s...%s: property is not a map", nestedName, key) } - _, ats, err := userconfig.TerraformTypes(userconfig.SlicedString(va["type"])) + _, aivenTypes, err := userconfig.TerraformTypes(userconfig.SlicedString(valueAttributes["type"])) if err != nil { return nil, err } - if len(ats) > 1 { - return nil, fmt.Errorf("%s...%s: multiple types", n, k) + if len(aivenTypes) > 1 { + return nil, fmt.Errorf("%s...%s: multiple types", nestedName, key) } - t := ats[0] + typeReceived := aivenTypes[0] - vr, ok := r[k] - if !ok || vr == nil { - if t == "object" { + valueReceived, keyExists := responseMapping[key] + if !keyExists || valueReceived == nil { + if typeReceived == "object" { continue } - vr = unsettedAPIValue(t) + valueReceived = unsetAPIValue(typeReceived) } - var vrs interface{} + var valueReceivedParsed any - switch t { + switch typeReceived { default: - switch vra := vr.(type) { + switch valueReceivedAsArray := valueReceived.(type) { default: - vrs = vr - case []interface{}: - var l []interface{} - - if vanp, ok := sliceHasNestedProperties(vr, va); ok { - for kn, vn := range vra { - vna, ok := vn.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("%s...%s.%d: slice item is not a map", n, k, kn) + valueReceivedParsed = valueReceived + case []any: + var list []any + + if valueNestedProperties, isArray := hasNestedProperties(valueReceived, valueAttributes); isArray { + for nestedKey, nestedValue := range valueReceivedAsArray { + nestedValueAlpha, valueIsMap := nestedValue.(map[string]any) + if !valueIsMap { + return nil, fmt.Errorf( + "%s...%s.%d: slice item is not a map", nestedName, key, nestedKey, + ) } - p, err := propsFromAPI(n, vna, vanp) + propertiesParsed, err := parsePropertiesFromAPI( + nestedName, nestedValueAlpha, valueNestedProperties, + ) if err != nil { return nil, err } - l = append(l, p) + list = append(list, propertiesParsed) } } else { - l = append(l, vra...) + list = append(list, valueReceivedAsArray...) } - // We need to get nested types for the array items, so we can add suffix if needed. - var nts []string + var nestedTypes []string - if i, ok := va["items"].(map[string]interface{}); ok { - if oo, ok := i["one_of"].([]interface{}); ok { - for _, v := range oo { - if va, ok := v.(map[string]interface{}); ok { - if vat, ok := va["type"].(string); ok { - nts = append(nts, vat) + if itemKey, isArray := valueAttributes["items"].(map[string]any); isArray { + if oneOfNumericKey, isArrayNumeric := itemKey["one_of"].([]any); isArrayNumeric { + for _, nestedValue := range oneOfNumericKey { + if nestedValueAlpha, valueIsMap := nestedValue.(map[string]any); valueIsMap { + if nestedValueAlphaType, valueIsString := + nestedValueAlpha["type"].(string); valueIsString { + nestedTypes = append(nestedTypes, nestedValueAlphaType) } } } } else { - _, nts, err = userconfig.TerraformTypes(userconfig.SlicedString(i["type"])) + _, nestedTypes, err = userconfig.TerraformTypes(userconfig.SlicedString(itemKey["type"])) if err != nil { return nil, err } } } - if len(nts) > 1 { - if len(l) > 0 { - lf := l[0] + if len(nestedTypes) > 1 { + if len(list) > 0 { + listFirstSeries := list[0] - switch lf.(type) { + switch listFirstSeries.(type) { case bool: - k = fmt.Sprintf("%s_boolean", k) + key = fmt.Sprintf("%s_boolean", key) case int: - k = fmt.Sprintf("%s_integer", k) + key = fmt.Sprintf("%s_integer", key) case float64: - k = fmt.Sprintf("%s_number", k) + key = fmt.Sprintf("%s_number", key) case string: - k = fmt.Sprintf("%s_string", k) - case []interface{}: - k = fmt.Sprintf("%s_array", k) - case map[string]interface{}: - k = fmt.Sprintf("%s_object", k) + key = fmt.Sprintf("%s_string", key) + case []any: + key = fmt.Sprintf("%s_array", key) + case map[string]any: + key = fmt.Sprintf("%s_object", key) default: - return nil, fmt.Errorf("%s...%s: no suffix for given type", n, k) + return nil, fmt.Errorf("%s...%s: no suffix for given type", nestedName, key) } - // TODO: Remove with the next major version. - if k == "ip_filter_string" { - k = "ip_filter" + if key == "ip_filter_string" { + key = "ip_filter" } - // TODO: Remove with the next major version. - if k == "namespaces_string" { - k = "namespaces" + if key == "namespaces_string" { + key = "namespaces" } } else { - for _, v := range nts { - // TODO: Inline with the next major version. - tk := fmt.Sprintf("%s_%s", k, v) + for _, nestedValue := range nestedTypes { + trimmedKey := fmt.Sprintf("%s_%s", key, nestedValue) - // TODO: Remove with the next major version. - if tk == "ip_filter_string" { - tk = "ip_filter" + if trimmedKey == "ip_filter_string" { + trimmedKey = "ip_filter" } - // TODO: Remove with the next major version. - if tk == "namespaces_string" { - tk = "namespaces" + if trimmedKey == "namespaces_string" { + trimmedKey = "namespaces" } - res[tk] = l + propertyMappingCopy[trimmedKey] = list } continue } } - vrs = l + valueReceivedParsed = list } case "object": - vra, ok := vr.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("%s...%s: representation value is not a map", n, k) + valueReceivedAsAlpha, valueIsMap := valueReceived.(map[string]any) + if !valueIsMap { + return nil, fmt.Errorf("%s...%s: representation value is not a map", nestedName, key) } - nv, ok := va["properties"] - if !ok { - return nil, fmt.Errorf("%s...%s: properties key not found", n, k) + nestedValues, keyExists := valueAttributes["properties"] + if !keyExists { + return nil, fmt.Errorf("%s...%s: properties key not found", nestedName, key) } - nva, ok := nv.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("%s...%s: properties value is not a map", n, k) + nestedValuesAsAlpha, valueIsMap := nestedValues.(map[string]any) + if !valueIsMap { + return nil, fmt.Errorf("%s...%s: properties value is not a map", nestedName, key) } - p, err := propsFromAPI(n, vra, nva) + propertiesParsed, err := parsePropertiesFromAPI(nestedName, valueReceivedAsAlpha, nestedValuesAsAlpha) if err != nil { return nil, err } - vrs = []map[string]interface{}{p} + valueReceivedParsed = []map[string]any{propertiesParsed} } - res[userconfig.EncodeKey(k)] = vrs + propertyMappingCopy[userconfig.EncodeKey(key)] = valueReceivedParsed } - return res, nil + return propertyMappingCopy, nil } -// FromAPI is a function that converts filled API response to Terraform user configuration schema. -func FromAPI(st userconfig.SchemaType, n string, r map[string]interface{}) ([]map[string]interface{}, error) { - var res []map[string]interface{} +// FromAPI is a function that returns a slice of properties parsed from an API response. +func FromAPI( + schemaType userconfig.SchemaType, + nestedName string, + responseMapping map[string]any, +) ([]map[string]any, error) { + var propertiesParsed []map[string]any - if len(r) == 0 { - return res, nil + if len(responseMapping) == 0 { + return propertiesParsed, nil } - p, _, err := propsReqs(st, n) + propertyRequests, _, err := propsReqs(schemaType, nestedName) if err != nil { return nil, err } - pa, err := propsFromAPI(n, r, p) + propertyAttributes, err := parsePropertiesFromAPI(nestedName, responseMapping, propertyRequests) if err != nil { return nil, err } - res = append(res, pa) + propertiesParsed = append(propertiesParsed, propertyAttributes) - return res, nil + return propertiesParsed, nil } diff --git a/internal/schemautil/userconfig/apiconvert/fromapi_test.go b/internal/schemautil/userconfig/apiconvert/fromapi_test.go index b7499386e..4e269edc3 100644 --- a/internal/schemautil/userconfig/apiconvert/fromapi_test.go +++ b/internal/schemautil/userconfig/apiconvert/fromapi_test.go @@ -11,34 +11,34 @@ import ( // TestFromAPI is a test for FromAPI. func TestFromAPI(t *testing.T) { type args struct { - st userconfig.SchemaType - n string - r map[string]interface{} + schemaType userconfig.SchemaType + serviceName string + request map[string]any } tests := []struct { name string args args - want []map[string]interface{} + want []map[string]any }{ { name: "boolean", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", - r: map[string]interface{}{ + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", + request: map[string]any{ "m3coordinator_enable_graphite_carbon_ingest": true, }, }, - want: []map[string]interface{}{{ - "additional_backup_regions": []interface{}(nil), + want: []map[string]any{{ + "additional_backup_regions": []any(nil), "custom_domain": "", - "ip_filter": []interface{}(nil), - "ip_filter_object": []interface{}(nil), + "ip_filter": []any(nil), + "ip_filter_object": []any(nil), "m3coordinator_enable_graphite_carbon_ingest": true, "m3db_version": "", "m3_version": "", - "namespaces": []interface{}(nil), + "namespaces": []any(nil), "project_to_fork_from": "", "service_to_fork_from": "", "static_ips": false, @@ -47,20 +47,20 @@ func TestFromAPI(t *testing.T) { { name: "integer", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", - r: map[string]interface{}{ - "limits": map[string]interface{}{ + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", + request: map[string]any{ + "limits": map[string]any{ "max_recently_queried_series_blocks": 20000, }, }, }, - want: []map[string]interface{}{{ - "additional_backup_regions": []interface{}(nil), + want: []map[string]any{{ + "additional_backup_regions": []any(nil), "custom_domain": "", - "ip_filter": []interface{}(nil), - "ip_filter_object": []interface{}(nil), - "limits": []map[string]interface{}{{ + "ip_filter": []any(nil), + "ip_filter_object": []any(nil), + "limits": []map[string]any{{ "max_recently_queried_series_blocks": 20000, "max_recently_queried_series_disk_bytes_read": 0, "max_recently_queried_series_lookback": "", @@ -71,7 +71,7 @@ func TestFromAPI(t *testing.T) { "m3coordinator_enable_graphite_carbon_ingest": false, "m3db_version": "", "m3_version": "", - "namespaces": []interface{}(nil), + "namespaces": []any(nil), "project_to_fork_from": "", "service_to_fork_from": "", "static_ips": false, @@ -80,20 +80,20 @@ func TestFromAPI(t *testing.T) { { name: "number and object", args: args{ - st: userconfig.ServiceTypes, - n: "kafka", - r: map[string]interface{}{ - "kafka": map[string]interface{}{ + schemaType: userconfig.ServiceTypes, + serviceName: "kafka", + request: map[string]any{ + "kafka": map[string]any{ "log_cleaner_min_cleanable_ratio": 0.5, }, }, }, - want: []map[string]interface{}{{ - "additional_backup_regions": []interface{}(nil), + want: []map[string]any{{ + "additional_backup_regions": []any(nil), "custom_domain": "", - "ip_filter": []interface{}(nil), - "ip_filter_object": []interface{}(nil), - "kafka": []map[string]interface{}{{ + "ip_filter": []any(nil), + "ip_filter_object": []any(nil), + "kafka": []map[string]any{{ "auto_create_topics_enable": false, "compression_type": "", "connections_max_idle_ms": 0, @@ -145,27 +145,27 @@ func TestFromAPI(t *testing.T) { { name: "array", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", - r: map[string]interface{}{ - "namespaces": []interface{}{ - map[string]interface{}{ + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", + request: map[string]any{ + "namespaces": []any{ + map[string]any{ "name": "default", "type": "unaggregated", }, }, }, }, - want: []map[string]interface{}{{ - "additional_backup_regions": []interface{}(nil), + want: []map[string]any{{ + "additional_backup_regions": []any(nil), "custom_domain": "", - "ip_filter": []interface{}(nil), - "ip_filter_object": []interface{}(nil), + "ip_filter": []any(nil), + "ip_filter_object": []any(nil), "m3coordinator_enable_graphite_carbon_ingest": false, "m3db_version": "", "m3_version": "", - "namespaces": []interface{}{ - map[string]interface{}{ + "namespaces": []any{ + map[string]any{ "name": "default", "resolution": "", "type": "unaggregated", @@ -179,26 +179,26 @@ func TestFromAPI(t *testing.T) { { name: "strings in one to many array", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", - r: map[string]interface{}{ - "ip_filter": []interface{}{ + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", + request: map[string]any{ + "ip_filter": []any{ "0.0.0.0/0", "10.20.0.0/16", }, }, }, - want: []map[string]interface{}{{ - "additional_backup_regions": []interface{}(nil), + want: []map[string]any{{ + "additional_backup_regions": []any(nil), "custom_domain": "", - "ip_filter": []interface{}{ + "ip_filter": []any{ "0.0.0.0/0", "10.20.0.0/16", }, "m3coordinator_enable_graphite_carbon_ingest": false, "m3db_version": "", "m3_version": "", - "namespaces": []interface{}(nil), + "namespaces": []any(nil), "project_to_fork_from": "", "service_to_fork_from": "", "static_ips": false, @@ -207,30 +207,30 @@ func TestFromAPI(t *testing.T) { { name: "objects in one to many array", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", - r: map[string]interface{}{ - "ip_filter": []interface{}{ - map[string]interface{}{ + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", + request: map[string]any{ + "ip_filter": []any{ + map[string]any{ "description": "test", "network": "0.0.0.0/0", }, - map[string]interface{}{ + map[string]any{ "description": "", "network": "10.20.0.0/16", }, }, }, }, - want: []map[string]interface{}{{ - "additional_backup_regions": []interface{}(nil), + want: []map[string]any{{ + "additional_backup_regions": []any(nil), "custom_domain": "", - "ip_filter_object": []interface{}{ - map[string]interface{}{ + "ip_filter_object": []any{ + map[string]any{ "description": "test", "network": "0.0.0.0/0", }, - map[string]interface{}{ + map[string]any{ "description": "", "network": "10.20.0.0/16", }, @@ -238,7 +238,7 @@ func TestFromAPI(t *testing.T) { "m3coordinator_enable_graphite_carbon_ingest": false, "m3db_version": "", "m3_version": "", - "namespaces": []interface{}(nil), + "namespaces": []any(nil), "project_to_fork_from": "", "service_to_fork_from": "", "static_ips": false, @@ -247,13 +247,13 @@ func TestFromAPI(t *testing.T) { { name: "strings in one to many array via one_of", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", - r: map[string]interface{}{ - "rules": map[string]interface{}{ - "mapping": []interface{}{ - map[string]interface{}{ - "namespaces": []interface{}{ + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", + request: map[string]any{ + "rules": map[string]any{ + "mapping": []any{ + map[string]any{ + "namespaces": []any{ "aggregated_*", }, }, @@ -261,27 +261,27 @@ func TestFromAPI(t *testing.T) { }, }, }, - want: []map[string]interface{}{{ - "additional_backup_regions": []interface{}(nil), + want: []map[string]any{{ + "additional_backup_regions": []any(nil), "custom_domain": "", - "ip_filter": []interface{}(nil), - "ip_filter_object": []interface{}(nil), + "ip_filter": []any(nil), + "ip_filter_object": []any(nil), "m3coordinator_enable_graphite_carbon_ingest": false, "m3db_version": "", "m3_version": "", - "namespaces": []interface{}(nil), + "namespaces": []any(nil), "project_to_fork_from": "", - "rules": []map[string]interface{}{{ - "mapping": []interface{}{ - map[string]interface{}{ - "aggregations": []interface{}(nil), + "rules": []map[string]any{{ + "mapping": []any{ + map[string]any{ + "aggregations": []any(nil), "drop": false, "filter": "", "name": "", - "namespaces": []interface{}{ + "namespaces": []any{ "aggregated_*", }, - "tags": []interface{}(nil), + "tags": []any(nil), }, }, }}, @@ -292,14 +292,14 @@ func TestFromAPI(t *testing.T) { { name: "objects in one to many array via one_of", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", - r: map[string]interface{}{ - "rules": map[string]interface{}{ - "mapping": []interface{}{ - map[string]interface{}{ - "namespaces": []interface{}{ - map[string]interface{}{ + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", + request: map[string]any{ + "rules": map[string]any{ + "mapping": []any{ + map[string]any{ + "namespaces": []any{ + map[string]any{ "resolution": "30s", "retention": "48h", }, @@ -309,30 +309,30 @@ func TestFromAPI(t *testing.T) { }, }, }, - want: []map[string]interface{}{{ - "additional_backup_regions": []interface{}(nil), + want: []map[string]any{{ + "additional_backup_regions": []any(nil), "custom_domain": "", - "ip_filter": []interface{}(nil), - "ip_filter_object": []interface{}(nil), + "ip_filter": []any(nil), + "ip_filter_object": []any(nil), "m3coordinator_enable_graphite_carbon_ingest": false, "m3db_version": "", "m3_version": "", - "namespaces": []interface{}(nil), + "namespaces": []any(nil), "project_to_fork_from": "", - "rules": []map[string]interface{}{{ - "mapping": []interface{}{ - map[string]interface{}{ - "aggregations": []interface{}(nil), + "rules": []map[string]any{{ + "mapping": []any{ + map[string]any{ + "aggregations": []any(nil), "drop": false, "filter": "", "name": "", - "namespaces_object": []interface{}{ - map[string]interface{}{ + "namespaces_object": []any{ + map[string]any{ "resolution": "30s", "retention": "48h", }, }, - "tags": []interface{}(nil), + "tags": []any(nil), }, }, }}, @@ -344,7 +344,7 @@ func TestFromAPI(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, _ := FromAPI(tt.args.st, tt.args.n, tt.args.r) + got, _ := FromAPI(tt.args.schemaType, tt.args.serviceName, tt.args.request) if !cmp.Equal(got, tt.want) { t.Errorf(cmp.Diff(tt.want, got)) diff --git a/internal/schemautil/userconfig/apiconvert/toapi.go b/internal/schemautil/userconfig/apiconvert/toapi.go index 94baccdb0..586d03cf0 100644 --- a/internal/schemautil/userconfig/apiconvert/toapi.go +++ b/internal/schemautil/userconfig/apiconvert/toapi.go @@ -13,13 +13,13 @@ import ( // resourceDatable is an interface that allows to get the resource data from the schema. // This is needed to be able to test the conversion functions. See schema.ResourceData for more. type resourceDatable interface { - GetOk(string) (interface{}, bool) + GetOk(string) (any, bool) HasChange(string) bool IsNewResource() bool } var ( - // dotAnyDotNumberRegExp is a regular expression that matches a string that matches: + // keyPathEndingInNumberRegExp is a regular expression that matches a string that matches: // 1. key.1.key2.0.key3.2.key.5 // 2. key123.0 // 3. key.1 @@ -31,9 +31,9 @@ var ( // 3. key.abc // 4. .1 // 5. key. - dotAnyDotNumberRegExp = regexp.MustCompile(`.+\.[0-9]$`) + keyPathEndingInNumberRegExp = regexp.MustCompile(`.+\.[0-9]$`) - // dotNumberEOLOrDotRegExp is a regular expression that matches a string that matches: + // dotSeparatedNumberRegExp is a regular expression that matches a string that matches: // 1. .5 (match: .5) // 2. .9. (match: .9.) // 3. 0.1 (match: .1) @@ -45,58 +45,58 @@ var ( // 2. 1. // 3. 1.. // 4. .5a - dotNumberEOLOrDotRegExp = regexp.MustCompile(`\.\d($|\.)`) + dotSeparatedNumberRegExp = regexp.MustCompile(`\.\d($|\.)`) ) // arrayItemToAPI is a function that converts array property of Terraform user configuration schema to API // compatible format. func arrayItemToAPI( - n string, - fk []string, - k string, - v []interface{}, - i map[string]interface{}, - d resourceDatable, -) (interface{}, bool, error) { - var res []interface{} - - if len(v) == 0 { + serviceName string, + fullKeyPath []string, + arrayKey string, + arrayValues []any, + itemMap map[string]any, + resourceData resourceDatable, +) (any, bool, error) { + var convertedValues []any + + if len(arrayValues) == 0 { return json.RawMessage("[]"), false, nil } - fks := strings.Join(fk, ".") + fullKeyString := strings.Join(fullKeyPath, ".") // TODO: Remove when this is fixed on backend. - if k == "additional_backup_regions" { - return res, true, nil + if arrayKey == "additional_backup_regions" { + return convertedValues, true, nil } - ii, ok := i["items"].(map[string]interface{}) + itemMapItems, ok := itemMap["items"].(map[string]any) if !ok { - return nil, false, fmt.Errorf("%s (item): items key not found", fks) + return nil, false, fmt.Errorf("%s (item): items key not found", fullKeyString) } - var iit string + var itemType string // If the key has a type suffix, we use it to determine the type of the value. - if userconfig.IsKeyTyped(k) { - iit = k[strings.LastIndexByte(k, '_')+1:] + if userconfig.IsKeyTyped(arrayKey) { + itemType = arrayKey[strings.LastIndexByte(arrayKey, '_')+1:] // Find the one_of item that matches the type. - if oo, ok := ii["one_of"]; ok { - ooa, ok := oo.([]interface{}) + if oneOfItems, ok := itemMapItems["one_of"]; ok { + oneOfItemsSlice, ok := oneOfItems.([]any) if !ok { - return nil, false, fmt.Errorf("%s (items.one_of): not a slice", fks) + return nil, false, fmt.Errorf("%s (items.one_of): not a slice", fullKeyString) } - for i, vn := range ooa { - vna, ok := vn.(map[string]interface{}) + for i, oneOfItem := range oneOfItemsSlice { + oneOfItemMap, ok := oneOfItem.(map[string]any) if !ok { - return nil, false, fmt.Errorf("%s (items.one_of.%d): not a map", fks, i) + return nil, false, fmt.Errorf("%s (items.one_of.%d): not a map", fullKeyString, i) } - if ot, ok := vna["type"]; ok && ot == iit { - ii = vna + if itemTypeValue, ok := oneOfItemMap["type"]; ok && itemTypeValue == itemType { + itemMapItems = oneOfItemMap break } @@ -104,226 +104,214 @@ func arrayItemToAPI( } } else { // TODO: Remove this statement and the branch below it with the next major version. - _, ok := ii["one_of"] + _, ok := itemMapItems["one_of"] - if k == "ip_filter" || (ok && k == "namespaces") { - iit = "string" + if arrayKey == "ip_filter" || (ok && arrayKey == "namespaces") { + itemType = "string" } else { - _, aiits, err := userconfig.TerraformTypes(userconfig.SlicedString(ii["type"])) + _, itemTypes, err := userconfig.TerraformTypes(userconfig.SlicedString(itemMapItems["type"])) if err != nil { return nil, false, err } - if len(aiits) > 1 { - return nil, false, fmt.Errorf("%s (type): multiple types", fks) + if len(itemTypes) > 1 { + return nil, false, fmt.Errorf("%s (type): multiple types", fullKeyString) } - iit = aiits[0] + itemType = itemTypes[0] } } - for i, vn := range v { + for i, arrayValue := range arrayValues { // We only accept slices there, so we need to nest the value into a slice if the value is of object type. - if iit == "object" { - vn = []interface{}{vn} + if itemType == "object" { + arrayValue = []any{arrayValue} } - vnc, o, err := itemToAPI( - n, - iit, - append(fk, fmt.Sprintf("%d", i)), - fmt.Sprintf("%s.%d", k, i), - vn, - ii, + convertedValue, omit, err := itemToAPI( + serviceName, + itemType, + append(fullKeyPath, fmt.Sprintf("%d", i)), + fmt.Sprintf("%s.%d", arrayKey, i), + arrayValue, + itemMapItems, false, - d, + resourceData, ) if err != nil { return nil, false, err } - if !o { - res = append(res, vnc) + if !omit { + convertedValues = append(convertedValues, convertedValue) } } - return res, false, nil + return convertedValues, false, nil } // objectItemToAPI is a function that converts object property of Terraform user configuration schema to API // compatible format. func objectItemToAPI( - n string, - fk []string, - v []interface{}, - i map[string]interface{}, - d resourceDatable, -) (interface{}, bool, error) { - var res interface{} + serviceName string, + fullKeyPath []string, + objectValues []any, + itemSchema map[string]any, + resourceData resourceDatable, +) (any, bool, error) { + var result any - fks := strings.Join(fk, ".") + fullKeyString := strings.Join(fullKeyPath, ".") - fv := v[0] + firstValue := objectValues[0] // Object with only "null" fields becomes nil // Which can't be cast into a map - if fv == nil { - return res, true, nil + if firstValue == nil { + return result, true, nil } - fva, ok := fv.(map[string]interface{}) + firstValueAsMap, ok := firstValue.(map[string]any) if !ok { - return nil, false, fmt.Errorf("%s: not a map", fks) + return nil, false, fmt.Errorf("%s: not a map", fullKeyString) } - ip, ok := i["properties"].(map[string]interface{}) + itemProperties, ok := itemSchema["properties"].(map[string]any) if !ok { - return nil, false, fmt.Errorf("%s (item): properties key not found", fks) + return nil, false, fmt.Errorf("%s (item): properties key not found", fullKeyString) } - reqs := map[string]struct{}{} + requiredFields := map[string]struct{}{} - if sreqs, ok := i["required"].([]interface{}); ok { - reqs = userconfig.SliceToKeyedMap(sreqs) + if schemaRequiredFields, ok := itemSchema["required"].([]any); ok { + requiredFields = userconfig.SliceToKeyedMap(schemaRequiredFields) } - if !dotAnyDotNumberRegExp.MatchString(fks) { - fk = append(fk, "0") + if !keyPathEndingInNumberRegExp.MatchString(fullKeyString) { + fullKeyPath = append(fullKeyPath, "0") } - res, err := propsToAPI(n, fk, fva, ip, reqs, d) + result, err := propsToAPI( + serviceName, + fullKeyPath, + firstValueAsMap, + itemProperties, + requiredFields, + resourceData, + ) if err != nil { return nil, false, err } - return res, false, nil + return result, false, nil } // itemToAPI is a function that converts property of Terraform user configuration schema to API compatible format. func itemToAPI( - n string, - t string, - fk []string, - k string, - v interface{}, - i map[string]interface{}, - ireq bool, - d resourceDatable, -) (interface{}, bool, error) { - res := v - - fks := strings.Join(fk, ".") - - // We omit the value if it has no changes in the Terraform user configuration. - o := !d.HasChange(fks) - - // We need to make sure that if there were any changes to the parent object, we also send the value, even if it - // was not changed. - // - // We check that there are more than three elements in the fk slice, because we don't want to send the value if - // the parent object is the root object. - if o && len(fk) > 3 { - // We find the last index of the dot with a number after it, because we want to check if the parent object - // was changed. - match := dotNumberEOLOrDotRegExp.FindAllStringIndex(fks, -1) - if match != nil { - // We check if fks exists, i.e. it was set by the user, because if it was not set, we don't want to send - // the value. - _, e := d.GetOk(fks) - - // We get the length of the match slice to use it in the formula: lmatch - (lmatch - 1), which gives us - // the index of the last match, which is the parent object. We then get the index of the parent object - // in the fk slice and use it to get the key of the parent object. - lmatch := len(match) - - // Since Terraform thinks that new array elements are added without "existing", we also send the value if - // it does not exist, but is not empty either. - if (e || !reflect.ValueOf(v).IsZero()) && d.HasChange(fks[:match[lmatch-(lmatch-1)][0]]) { - o = false + serviceName string, + itemType string, + fullKeyPath []string, + key string, + value any, + inputMap map[string]any, + isRequired bool, + resourceData resourceDatable, +) (any, bool, error) { + result := value + + fullKeyString := strings.Join(fullKeyPath, ".") + + omitValue := !resourceData.HasChange(fullKeyString) + + if omitValue && len(fullKeyPath) > 3 { + lastDotWithNumberIndex := dotSeparatedNumberRegExp.FindAllStringIndex(fullKeyString, -1) + if lastDotWithNumberIndex != nil { + _, exists := resourceData.GetOk(fullKeyString) + lengthOfMatches := len(lastDotWithNumberIndex) + + if (exists || !reflect.ValueOf(value).IsZero()) && + resourceData.HasChange(fullKeyString[:lastDotWithNumberIndex[lengthOfMatches-(lengthOfMatches-1)][0]]) { + omitValue = false } } } - // We need to make sure that if the value is required, we send it, even if it has no changes in the Terraform. - if o && ireq { - o = false + if omitValue && isRequired { + omitValue = false } - // Assert the type of the value to match. - switch t { + switch itemType { case "boolean": - if _, ok := v.(bool); !ok { - return nil, false, fmt.Errorf("%s: not a boolean", fks) + if _, ok := value.(bool); !ok { + return nil, false, fmt.Errorf("%s: not a boolean", fullKeyString) } case "integer": - if _, ok := v.(int); !ok { - return nil, false, fmt.Errorf("%s: not an integer", fks) + if _, ok := value.(int); !ok { + return nil, false, fmt.Errorf("%s: not an integer", fullKeyString) } case "number": - if _, ok := v.(float64); !ok { - return nil, false, fmt.Errorf("%s: not a number", fks) + if _, ok := value.(float64); !ok { + return nil, false, fmt.Errorf("%s: not a number", fullKeyString) } case "string": - if _, ok := v.(string); !ok { - return nil, false, fmt.Errorf("%s: not a string", fks) + if _, ok := value.(string); !ok { + return nil, false, fmt.Errorf("%s: not a string", fullKeyString) } case "array", "object": - // Arrays and objects are handled separately. - - va, ok := v.([]interface{}) + valueArray, ok := value.([]any) if !ok { - return nil, false, fmt.Errorf("%s: not a slice", fks) + return nil, false, fmt.Errorf("%s: not a slice", fullKeyString) } - if va == nil || o { + if valueArray == nil || omitValue { return nil, true, nil } - if t == "array" { - return arrayItemToAPI(n, fk, k, va, i, d) + if itemType == "array" { + return arrayItemToAPI(serviceName, fullKeyPath, key, valueArray, inputMap, resourceData) } - if len(va) == 0 { + if len(valueArray) == 0 { return nil, true, nil } - return objectItemToAPI(n, fk, va, i, d) + return objectItemToAPI(serviceName, fullKeyPath, valueArray, inputMap, resourceData) default: - return nil, false, fmt.Errorf("%s: unsupported type %s", fks, t) + return nil, false, fmt.Errorf("%s: unsupported type %s", fullKeyString, itemType) } - return res, o, nil + return result, omitValue, nil } // processManyToOneKeys processes many to one keys by mapping them to their first non-empty value. -func processManyToOneKeys(res map[string]interface{}) { - // mto is a map that stores the keys and their associated many to one keys. - mto := make(map[string][]string) +func processManyToOneKeys(result map[string]any) { + // manyToOneKeyMap maps primary keys to their associated many-to-one keys. + manyToOneKeyMap := make(map[string][]string) // Iterate over the result map. // TODO: Remove all ip_filter and namespaces special cases when these fields are removed. - for k, v := range res { + for key, value := range result { // If the value is a map, process it recursively. - if va, ok := v.(map[string]interface{}); ok { - processManyToOneKeys(va) + if valueAsMap, ok := value.(map[string]any); ok { + processManyToOneKeys(valueAsMap) } // Ignore keys that are not typed and are not special keys. - if !userconfig.IsKeyTyped(k) && k != "ip_filter" && k != "namespaces" { + if !userconfig.IsKeyTyped(key) && key != "ip_filter" && key != "namespaces" { continue } // Extract the real key, which is the key without suffix unless it's a special key. - rk := k - if k != "ip_filter" && k != "namespaces" { - rk = k[:strings.LastIndexByte(k, '_')] + realKey := key + if key != "ip_filter" && key != "namespaces" { + realKey = key[:strings.LastIndexByte(key, '_')] } - // Append the key to its corresponding list in the mto map. - mto[rk] = append(mto[rk], k) + // Append the key to its corresponding list in the manyToOneKeyMap map. + manyToOneKeyMap[realKey] = append(manyToOneKeyMap[realKey], key) } - // By this stage, the 'mto' map takes a form similar to the following: + // By this stage, the 'manyToOneKeyMap' map takes a form similar to the following: // map[string][]string{ // // For 'ip_filter', there are two associated keys in the user configuration. The first non-empty one is used, // // for instance, if the user shifts from 'ip_filter' to 'ip_filter_object', the latter is preferred. @@ -332,18 +320,18 @@ func processManyToOneKeys(res map[string]interface{}) { // "namespaces": []string{"namespaces"}, // } - // Iterate over the many to one keys. - for k, v := range mto { - var nv interface{} // The new value for the key. + // Iterate over the many-to-one keys. + for primaryKey, associatedKeys := range manyToOneKeyMap { + var newValue any // The new value for the key. wasDeleted := false // Track if any key was deleted in the loop. - // Attempt to process the values as []interface{}. - for _, vn := range v { - if rv, ok := res[vn].([]interface{}); ok && len(rv) > 0 { - nv = rv + // Attempt to process the values as []any. + for _, associatedKey := range associatedKeys { + if associatedValue, ok := result[associatedKey].([]any); ok && len(associatedValue) > 0 { + newValue = associatedValue - delete(res, vn) // Delete the processed key-value pair from the result. + delete(result, associatedKey) // Delete the processed key-value pair from the result. wasDeleted = true } @@ -351,127 +339,144 @@ func processManyToOneKeys(res map[string]interface{}) { // If no key was deleted, attempt to process the values as json.RawMessage. if !wasDeleted { - for _, vn := range v { - if rv, ok := res[vn].(json.RawMessage); ok { - nv = rv + for _, associatedKey := range associatedKeys { + if associatedValue, ok := result[associatedKey].(json.RawMessage); ok { + newValue = associatedValue + + delete(result, associatedKey) // Delete the processed key-value pair from the result. - delete(res, vn) // Delete the processed key-value pair from the result. + break } } } - // Finally, set the new value for the key. - res[k] = nv + result[primaryKey] = newValue // Set the new value for the primary key. } } // propsToAPI is a function that converts properties of Terraform user configuration schema to API compatible format. func propsToAPI( - n string, - fk []string, - tp map[string]interface{}, - p map[string]interface{}, - reqs map[string]struct{}, - d resourceDatable, -) (map[string]interface{}, error) { - res := make(map[string]interface{}, len(tp)) + name string, + fullKeyPath []string, + types map[string]any, + properties map[string]any, + requiredFields map[string]struct{}, + data resourceDatable, +) (map[string]any, error) { + result := make(map[string]any, len(types)) - fks := strings.Join(fk, ".") + fullKeyString := strings.Join(fullKeyPath, ".") - for k, v := range tp { - k = userconfig.DecodeKey(k) + for typeKey, typeValue := range types { + typeKey = userconfig.DecodeKey(typeKey) - rk := k + rawKey := typeKey - // If the key has a suffix, we need to strip it to be able to find the corresponding property in the schema. - if userconfig.IsKeyTyped(k) { - rk = k[:strings.LastIndexByte(k, '_')] + if userconfig.IsKeyTyped(typeKey) { + rawKey = typeKey[:strings.LastIndexByte(typeKey, '_')] } - i, ok := p[rk] + property, ok := properties[rawKey] if !ok { - return nil, fmt.Errorf("%s.%s: key not found", fks, k) + return nil, fmt.Errorf("%s.%s: key not found", fullKeyString, typeKey) } - if i == nil { + if property == nil { continue } - ia, ok := i.(map[string]interface{}) + propertyAttributes, ok := property.(map[string]any) if !ok { - return nil, fmt.Errorf("%s.%s: not a map", fks, k) + return nil, fmt.Errorf("%s.%s: not a map", fullKeyString, typeKey) } - // If the property is supposed to be present only during resource's creation, - // we need to skip it if the resource is being updated. - if co, ok := ia["create_only"]; ok && co.(bool) && !d.IsNewResource() { + if createOnly, ok := propertyAttributes["create_only"]; ok && createOnly.(bool) && !data.IsNewResource() { continue } - _, ats, err := userconfig.TerraformTypes(userconfig.SlicedString(ia["type"])) + _, attributeTypes, err := userconfig.TerraformTypes(userconfig.SlicedString(propertyAttributes["type"])) if err != nil { return nil, err } - if len(ats) > 1 { - return nil, fmt.Errorf("%s.%s.type: multiple types", fks, k) + if len(attributeTypes) > 1 { + return nil, fmt.Errorf("%s.%s.type: multiple types", fullKeyString, typeKey) } - _, ireq := reqs[k] + _, isRequired := requiredFields[typeKey] - t := ats[0] + attributeType := attributeTypes[0] - cv, o, err := itemToAPI(n, t, append(fk, k), k, v, ia, ireq, d) + convertedValue, omit, err := itemToAPI( + name, + attributeType, + append(fullKeyPath, typeKey), + typeKey, + typeValue, + propertyAttributes, + isRequired, + data, + ) if err != nil { return nil, err } - if !o { - res[k] = cv + if !omit { + result[typeKey] = convertedValue } } - processManyToOneKeys(res) + processManyToOneKeys(result) - return res, nil + return result, nil } // ToAPI is a function that converts filled Terraform user configuration schema to API compatible format. -func ToAPI(st userconfig.SchemaType, n string, d resourceDatable) (map[string]interface{}, error) { - var res map[string]interface{} - - // fk is a full key slice. We use it to get the full key path to the property in the Terraform user configuration. - fk := []string{fmt.Sprintf("%s_user_config", n)} - - tp, ok := d.GetOk(fk[0]) - if !ok || tp == nil { - return res, nil +func ToAPI( + schemaType userconfig.SchemaType, + serviceName string, + resourceData resourceDatable, +) (map[string]any, error) { + var result map[string]any + + fullKeyPath := []string{fmt.Sprintf("%s_user_config", serviceName)} + + terraformConfig, ok := resourceData.GetOk(fullKeyPath[0]) + if !ok || terraformConfig == nil { + return result, nil } - tpa, ok := tp.([]interface{}) + configSlice, ok := terraformConfig.([]any) if !ok { - return nil, fmt.Errorf("%s (%d): not a slice", n, st) + return nil, fmt.Errorf("%s (%d): not a slice", serviceName, schemaType) } - ftp := tpa[0] - if ftp == nil { - return res, nil + firstConfig := configSlice[0] + if firstConfig == nil { + return result, nil } - ftpa, ok := ftp.(map[string]interface{}) + configMap, ok := firstConfig.(map[string]any) if !ok { - return nil, fmt.Errorf("%s.0 (%d): not a map", n, st) + return nil, fmt.Errorf("%s.0 (%d): not a map", serviceName, schemaType) } - p, reqs, err := propsReqs(st, n) + properties, requiredProperties, err := propsReqs(schemaType, serviceName) if err != nil { return nil, err } - res, err = propsToAPI(n, append(fk, "0"), ftpa, p, reqs, d) + result, err = propsToAPI( + serviceName, + append(fullKeyPath, "0"), + configMap, + properties, + requiredProperties, + resourceData, + ) if err != nil { return nil, err } - return res, nil + return result, nil } diff --git a/internal/schemautil/userconfig/apiconvert/toapi_test.go b/internal/schemautil/userconfig/apiconvert/toapi_test.go index b52041779..056a91ae2 100644 --- a/internal/schemautil/userconfig/apiconvert/toapi_test.go +++ b/internal/schemautil/userconfig/apiconvert/toapi_test.go @@ -11,7 +11,7 @@ import ( // testResourceData is a resourceDatable compatible struct for testing. type testResourceData struct { - d map[string]interface{} + d map[string]any e map[string]struct{} c map[string]struct{} n bool @@ -19,7 +19,7 @@ type testResourceData struct { // newTestResourceData is a constructor for testResourceData. func newTestResourceData( - d map[string]interface{}, + d map[string]any, e map[string]struct{}, c map[string]struct{}, n bool, @@ -28,7 +28,7 @@ func newTestResourceData( } // GetOk is a test implementation of resourceDatable.GetOk. -func (t *testResourceData) GetOk(k string) (interface{}, bool) { +func (t *testResourceData) GetOk(k string) (any, bool) { v := t.d[k] _, e := t.e[k] @@ -51,25 +51,25 @@ func (t *testResourceData) IsNewResource() bool { // TestToAPI is a test for ToAPI. func TestToAPI(t *testing.T) { type args struct { - st userconfig.SchemaType - n string - d resourceDatable + schemaType userconfig.SchemaType + serviceName string + d resourceDatable } tests := []struct { name string args args - want map[string]interface{} + want map[string]any }{ { name: "boolean", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", d: newTestResourceData( - map[string]interface{}{ - "m3db_user_config": []interface{}{ - map[string]interface{}{ + map[string]any{ + "m3db_user_config": []any{ + map[string]any{ "m3coordinator_enable_graphite_carbon_ingest": true, }, }, @@ -83,19 +83,19 @@ func TestToAPI(t *testing.T) { false, ), }, - want: map[string]interface{}{ + want: map[string]any{ "m3coordinator_enable_graphite_carbon_ingest": true, }, }, { name: "boolean no changes", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", d: newTestResourceData( - map[string]interface{}{ - "m3db_user_config": []interface{}{ - map[string]interface{}{ + map[string]any{ + "m3db_user_config": []any{ + map[string]any{ "m3coordinator_enable_graphite_carbon_ingest": true, }, }, @@ -107,19 +107,19 @@ func TestToAPI(t *testing.T) { false, ), }, - want: map[string]interface{}{}, + want: map[string]any{}, }, { name: "integer", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", d: newTestResourceData( - map[string]interface{}{ - "m3db_user_config": []interface{}{ - map[string]interface{}{ - "limits": []interface{}{ - map[string]interface{}{ + map[string]any{ + "m3db_user_config": []any{ + map[string]any{ + "limits": []any{ + map[string]any{ "max_recently_queried_series_blocks": 20000, }, }, @@ -136,8 +136,8 @@ func TestToAPI(t *testing.T) { false, ), }, - want: map[string]interface{}{ - "limits": map[string]interface{}{ + want: map[string]any{ + "limits": map[string]any{ "max_recently_queried_series_blocks": 20000, }, }, @@ -145,14 +145,14 @@ func TestToAPI(t *testing.T) { { name: "integer no changes", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", d: newTestResourceData( - map[string]interface{}{ - "m3db_user_config": []interface{}{ - map[string]interface{}{ - "limits": []interface{}{ - map[string]interface{}{ + map[string]any{ + "m3db_user_config": []any{ + map[string]any{ + "limits": []any{ + map[string]any{ "max_recently_queried_series_blocks": 20000, }, }, @@ -166,19 +166,19 @@ func TestToAPI(t *testing.T) { false, ), }, - want: map[string]interface{}{}, + want: map[string]any{}, }, { name: "number and object", args: args{ - st: userconfig.ServiceTypes, - n: "kafka", + schemaType: userconfig.ServiceTypes, + serviceName: "kafka", d: newTestResourceData( - map[string]interface{}{ - "kafka_user_config": []interface{}{ - map[string]interface{}{ - "kafka": []interface{}{ - map[string]interface{}{ + map[string]any{ + "kafka_user_config": []any{ + map[string]any{ + "kafka": []any{ + map[string]any{ "log_cleaner_min_cleanable_ratio": 0.5, }, }, @@ -195,8 +195,8 @@ func TestToAPI(t *testing.T) { false, ), }, - want: map[string]interface{}{ - "kafka": map[string]interface{}{ + want: map[string]any{ + "kafka": map[string]any{ "log_cleaner_min_cleanable_ratio": 0.5, }, }, @@ -204,14 +204,14 @@ func TestToAPI(t *testing.T) { { name: "number and object no changes", args: args{ - st: userconfig.ServiceTypes, - n: "kafka", + schemaType: userconfig.ServiceTypes, + serviceName: "kafka", d: newTestResourceData( - map[string]interface{}{ - "kafka_user_config": []interface{}{ - map[string]interface{}{ - "kafka": []interface{}{ - map[string]interface{}{ + map[string]any{ + "kafka_user_config": []any{ + map[string]any{ + "kafka": []any{ + map[string]any{ "log_cleaner_min_cleanable_ratio": 0.5, }, }, @@ -225,17 +225,17 @@ func TestToAPI(t *testing.T) { false, ), }, - want: map[string]interface{}{}, + want: map[string]any{}, }, { name: "create_only string", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", d: newTestResourceData( - map[string]interface{}{ - "m3db_user_config": []interface{}{ - map[string]interface{}{ + map[string]any{ + "m3db_user_config": []any{ + map[string]any{ "project_to_fork_from": "anotherprojectname", }, }, @@ -249,19 +249,19 @@ func TestToAPI(t *testing.T) { true, ), }, - want: map[string]interface{}{ + want: map[string]any{ "project_to_fork_from": "anotherprojectname", }, }, { name: "create_only string during update", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", d: newTestResourceData( - map[string]interface{}{ - "m3db_user_config": []interface{}{ - map[string]interface{}{ + map[string]any{ + "m3db_user_config": []any{ + map[string]any{ "project_to_fork_from": "anotherprojectname", }, }, @@ -275,19 +275,19 @@ func TestToAPI(t *testing.T) { false, ), }, - want: map[string]interface{}{}, + want: map[string]any{}, }, { name: "array", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", d: newTestResourceData( - map[string]interface{}{ - "m3db_user_config": []interface{}{ - map[string]interface{}{ - "namespaces": []interface{}{ - map[string]interface{}{ + map[string]any{ + "m3db_user_config": []any{ + map[string]any{ + "namespaces": []any{ + map[string]any{ "name": "default", "type": "unaggregated", }, @@ -308,7 +308,7 @@ func TestToAPI(t *testing.T) { ), }, want: map[string]any{ - "namespaces": []interface{}{ + "namespaces": []any{ map[string]any{ "name": "default", "type": "unaggregated", @@ -319,14 +319,14 @@ func TestToAPI(t *testing.T) { { name: "array no changes in one key", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", d: newTestResourceData( - map[string]interface{}{ - "m3db_user_config": []interface{}{ - map[string]interface{}{ - "namespaces": []interface{}{ - map[string]interface{}{ + map[string]any{ + "m3db_user_config": []any{ + map[string]any{ + "namespaces": []any{ + map[string]any{ "name": "default", "type": "unaggregated", }, @@ -347,7 +347,7 @@ func TestToAPI(t *testing.T) { ), }, want: map[string]any{ - "namespaces": []interface{}{ + "namespaces": []any{ map[string]any{ "name": "default", "type": "unaggregated", @@ -358,14 +358,14 @@ func TestToAPI(t *testing.T) { { name: "array no changes", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", d: newTestResourceData( - map[string]interface{}{ - "m3db_user_config": []interface{}{ - map[string]interface{}{ - "namespaces": []interface{}{ - map[string]interface{}{ + map[string]any{ + "m3db_user_config": []any{ + map[string]any{ + "namespaces": []any{ + map[string]any{ "name": "default", "type": "unaggregated", }, @@ -385,13 +385,13 @@ func TestToAPI(t *testing.T) { { name: "strings in many to one array", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", d: newTestResourceData( - map[string]interface{}{ - "m3db_user_config": []interface{}{ - map[string]interface{}{ - "ip_filter": []interface{}{ + map[string]any{ + "m3db_user_config": []any{ + map[string]any{ + "ip_filter": []any{ "0.0.0.0/0", "10.20.0.0/16", }, @@ -410,7 +410,7 @@ func TestToAPI(t *testing.T) { ), }, want: map[string]any{ - "ip_filter": []interface{}{ + "ip_filter": []any{ "0.0.0.0/0", "10.20.0.0/16", }, @@ -419,13 +419,13 @@ func TestToAPI(t *testing.T) { { name: "strings in many to one array no changes", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", d: newTestResourceData( - map[string]interface{}{ - "m3db_user_config": []interface{}{ - map[string]interface{}{ - "ip_filter": []interface{}{ + map[string]any{ + "m3db_user_config": []any{ + map[string]any{ + "ip_filter": []any{ "0.0.0.0/0", "10.20.0.0/16", }, @@ -444,13 +444,13 @@ func TestToAPI(t *testing.T) { { name: "strings in many to one array unset", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", d: newTestResourceData( - map[string]interface{}{ - "m3db_user_config": []interface{}{ - map[string]interface{}{ - "ip_filter": []interface{}{}, + map[string]any{ + "m3db_user_config": []any{ + map[string]any{ + "ip_filter": []any{}, }, }, }, @@ -473,18 +473,18 @@ func TestToAPI(t *testing.T) { { name: "objects in many to one array", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", d: newTestResourceData( - map[string]interface{}{ - "m3db_user_config": []interface{}{ - map[string]interface{}{ - "ip_filter_object": []interface{}{ - map[string]interface{}{ + map[string]any{ + "m3db_user_config": []any{ + map[string]any{ + "ip_filter_object": []any{ + map[string]any{ "description": "test", "network": "0.0.0.0/0", }, - map[string]interface{}{ + map[string]any{ "description": "", "network": "10.20.0.0/16", }, @@ -508,12 +508,12 @@ func TestToAPI(t *testing.T) { ), }, want: map[string]any{ - "ip_filter": []interface{}{ - map[string]interface{}{ + "ip_filter": []any{ + map[string]any{ "description": "test", "network": "0.0.0.0/0", }, - map[string]interface{}{ + map[string]any{ "description": "", "network": "10.20.0.0/16", }, @@ -523,22 +523,22 @@ func TestToAPI(t *testing.T) { { name: "objects in many to one array no changes in one element", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", d: newTestResourceData( - map[string]interface{}{ - "m3db_user_config": []interface{}{ - map[string]interface{}{ - "ip_filter_object": []interface{}{ - map[string]interface{}{ + map[string]any{ + "m3db_user_config": []any{ + map[string]any{ + "ip_filter_object": []any{ + map[string]any{ "description": "test", "network": "0.0.0.0/0", }, - map[string]interface{}{ + map[string]any{ "description": "", "network": "10.20.0.0/16", }, - map[string]interface{}{ + map[string]any{ "description": "foo", "network": "1.3.3.7/32", }, @@ -566,16 +566,16 @@ func TestToAPI(t *testing.T) { ), }, want: map[string]any{ - "ip_filter": []interface{}{ - map[string]interface{}{ + "ip_filter": []any{ + map[string]any{ "description": "test", "network": "0.0.0.0/0", }, - map[string]interface{}{ + map[string]any{ "description": "", "network": "10.20.0.0/16", }, - map[string]interface{}{ + map[string]any{ "description": "foo", "network": "1.3.3.7/32", }, @@ -585,18 +585,18 @@ func TestToAPI(t *testing.T) { { name: "objects in many to one array no changes", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", d: newTestResourceData( - map[string]interface{}{ - "m3db_user_config": []interface{}{ - map[string]interface{}{ - "ip_filter_object": []interface{}{ - map[string]interface{}{ + map[string]any{ + "m3db_user_config": []any{ + map[string]any{ + "ip_filter_object": []any{ + map[string]any{ "description": "test", "network": "0.0.0.0/0", }, - map[string]interface{}{ + map[string]any{ "description": "", "network": "10.20.0.0/16", }, @@ -616,23 +616,23 @@ func TestToAPI(t *testing.T) { { name: "migration from strings to objects in many to one array", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", d: newTestResourceData( - map[string]interface{}{ - "m3db_user_config": []interface{}{ - map[string]interface{}{ - "ip_filter": []interface{}{}, - "ip_filter_object": []interface{}{ - map[string]interface{}{ + map[string]any{ + "m3db_user_config": []any{ + map[string]any{ + "ip_filter": []any{}, + "ip_filter_object": []any{ + map[string]any{ "description": "test", "network": "0.0.0.0/0", }, - map[string]interface{}{ + map[string]any{ "description": "", "network": "10.20.0.0/16", }, - map[string]interface{}{ + map[string]any{ "description": "foo", "network": "1.3.3.7/32", }, @@ -664,16 +664,16 @@ func TestToAPI(t *testing.T) { ), }, want: map[string]any{ - "ip_filter": []interface{}{ - map[string]interface{}{ + "ip_filter": []any{ + map[string]any{ "description": "test", "network": "0.0.0.0/0", }, - map[string]interface{}{ + map[string]any{ "description": "", "network": "10.20.0.0/16", }, - map[string]interface{}{ + map[string]any{ "description": "foo", "network": "1.3.3.7/32", }, @@ -683,17 +683,17 @@ func TestToAPI(t *testing.T) { { name: "strings in many to one array via one_of", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", d: newTestResourceData( - map[string]interface{}{ - "m3db_user_config": []interface{}{ - map[string]interface{}{ - "rules": []interface{}{ - map[string]interface{}{ - "mapping": []interface{}{ - map[string]interface{}{ - "namespaces": []interface{}{ + map[string]any{ + "m3db_user_config": []any{ + map[string]any{ + "rules": []any{ + map[string]any{ + "mapping": []any{ + map[string]any{ + "namespaces": []any{ "aggregated_*", }, }, @@ -718,10 +718,10 @@ func TestToAPI(t *testing.T) { ), }, want: map[string]any{ - "rules": map[string]interface{}{ - "mapping": []interface{}{ - map[string]interface{}{ - "namespaces": []interface{}{ + "rules": map[string]any{ + "mapping": []any{ + map[string]any{ + "namespaces": []any{ "aggregated_*", }, }, @@ -732,17 +732,17 @@ func TestToAPI(t *testing.T) { { name: "strings in many to one array via one_of no changes", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", d: newTestResourceData( - map[string]interface{}{ - "m3db_user_config": []interface{}{ - map[string]interface{}{ - "rules": []interface{}{ - map[string]interface{}{ - "mapping": []interface{}{ - map[string]interface{}{ - "namespaces": []interface{}{ + map[string]any{ + "m3db_user_config": []any{ + map[string]any{ + "rules": []any{ + map[string]any{ + "mapping": []any{ + map[string]any{ + "namespaces": []any{ "aggregated_*", }, }, @@ -764,18 +764,18 @@ func TestToAPI(t *testing.T) { { name: "objects in many to one array via one_of", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", d: newTestResourceData( - map[string]interface{}{ - "m3db_user_config": []interface{}{ - map[string]interface{}{ - "rules": []interface{}{ - map[string]interface{}{ - "mapping": []interface{}{ - map[string]interface{}{ - "namespaces_object": []interface{}{ - map[string]interface{}{ + map[string]any{ + "m3db_user_config": []any{ + map[string]any{ + "rules": []any{ + map[string]any{ + "mapping": []any{ + map[string]any{ + "namespaces_object": []any{ + map[string]any{ "resolution": "30s", "retention": "48h", }, @@ -804,11 +804,11 @@ func TestToAPI(t *testing.T) { ), }, want: map[string]any{ - "rules": map[string]interface{}{ - "mapping": []interface{}{ - map[string]interface{}{ - "namespaces": []interface{}{ - map[string]interface{}{ + "rules": map[string]any{ + "mapping": []any{ + map[string]any{ + "namespaces": []any{ + map[string]any{ "resolution": "30s", "retention": "48h", }, @@ -821,18 +821,18 @@ func TestToAPI(t *testing.T) { { name: "objects in many to one array via one_of no changes in one key", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", d: newTestResourceData( - map[string]interface{}{ - "m3db_user_config": []interface{}{ - map[string]interface{}{ - "rules": []interface{}{ - map[string]interface{}{ - "mapping": []interface{}{ - map[string]interface{}{ - "namespaces_object": []interface{}{ - map[string]interface{}{ + map[string]any{ + "m3db_user_config": []any{ + map[string]any{ + "rules": []any{ + map[string]any{ + "mapping": []any{ + map[string]any{ + "namespaces_object": []any{ + map[string]any{ "resolution": "30s", "retention": "48h", }, @@ -861,11 +861,11 @@ func TestToAPI(t *testing.T) { ), }, want: map[string]any{ - "rules": map[string]interface{}{ - "mapping": []interface{}{ - map[string]interface{}{ - "namespaces": []interface{}{ - map[string]interface{}{ + "rules": map[string]any{ + "mapping": []any{ + map[string]any{ + "namespaces": []any{ + map[string]any{ "resolution": "30s", "retention": "48h", }, @@ -878,18 +878,18 @@ func TestToAPI(t *testing.T) { { name: "objects in many to one array via one_of no changes", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", d: newTestResourceData( - map[string]interface{}{ - "m3db_user_config": []interface{}{ - map[string]interface{}{ - "rules": []interface{}{ - map[string]interface{}{ - "mapping": []interface{}{ - map[string]interface{}{ - "namespaces_object": []interface{}{ - map[string]interface{}{ + map[string]any{ + "m3db_user_config": []any{ + map[string]any{ + "rules": []any{ + map[string]any{ + "mapping": []any{ + map[string]any{ + "namespaces_object": []any{ + map[string]any{ "resolution": "30s", "retention": "48h", }, @@ -913,19 +913,19 @@ func TestToAPI(t *testing.T) { { name: "migration from strings to objects in many to one array via one_of", args: args{ - st: userconfig.ServiceTypes, - n: "m3db", + schemaType: userconfig.ServiceTypes, + serviceName: "m3db", d: newTestResourceData( - map[string]interface{}{ - "m3db_user_config": []interface{}{ - map[string]interface{}{ - "rules": []interface{}{ - map[string]interface{}{ - "mapping": []interface{}{ - map[string]interface{}{ - "namespaces": []interface{}{}, - "namespaces_object": []interface{}{ - map[string]interface{}{ + map[string]any{ + "m3db_user_config": []any{ + map[string]any{ + "rules": []any{ + map[string]any{ + "mapping": []any{ + map[string]any{ + "namespaces": []any{}, + "namespaces_object": []any{ + map[string]any{ "resolution": "30s", "retention": "48h", }, @@ -957,11 +957,11 @@ func TestToAPI(t *testing.T) { ), }, want: map[string]any{ - "rules": map[string]interface{}{ - "mapping": []interface{}{ - map[string]interface{}{ - "namespaces": []interface{}{ - map[string]interface{}{ + "rules": map[string]any{ + "mapping": []any{ + map[string]any{ + "namespaces": []any{ + map[string]any{ "resolution": "30s", "retention": "48h", }, @@ -974,12 +974,12 @@ func TestToAPI(t *testing.T) { { name: "required", args: args{ - st: userconfig.IntegrationEndpointTypes, - n: "rsyslog", + schemaType: userconfig.IntegrationEndpointTypes, + serviceName: "rsyslog", d: newTestResourceData( - map[string]interface{}{ - "rsyslog_user_config": []interface{}{ - map[string]interface{}{ + map[string]any{ + "rsyslog_user_config": []any{ + map[string]any{ "format": "rfc5424", "port": 514, "server": "rsyslog-server", @@ -1000,7 +1000,7 @@ func TestToAPI(t *testing.T) { false, ), }, - want: map[string]interface{}{ + want: map[string]any{ "format": "rfc5424", "port": 514, "server": "rsyslog-server", @@ -1011,22 +1011,22 @@ func TestToAPI(t *testing.T) { { name: "nested arrays no changes", args: args{ - st: userconfig.IntegrationTypes, - n: "clickhouse_kafka", + schemaType: userconfig.IntegrationTypes, + serviceName: "clickhouse_kafka", d: newTestResourceData( - map[string]interface{}{ - "clickhouse_kafka_user_config": []interface{}{ - map[string]interface{}{ - "tables": []interface{}{ - map[string]interface{}{ + map[string]any{ + "clickhouse_kafka_user_config": []any{ + map[string]any{ + "tables": []any{ + map[string]any{ "name": "foo", - "topics": []interface{}{ - map[string]interface{}{ + "topics": []any{ + map[string]any{ "name": "bar", }, }, - "columns": []interface{}{ - map[string]interface{}{ + "columns": []any{ + map[string]any{ "name": "baz", "type": "UInt16", }, @@ -1047,17 +1047,17 @@ func TestToAPI(t *testing.T) { true, ), }, - want: map[string]interface{}{ - "tables": []interface{}{ - map[string]interface{}{ + want: map[string]any{ + "tables": []any{ + map[string]any{ "name": "foo", - "topics": []interface{}{ - map[string]interface{}{ + "topics": []any{ + map[string]any{ "name": "bar", }, }, - "columns": []interface{}{ - map[string]interface{}{ + "columns": []any{ + map[string]any{ "name": "baz", "type": "UInt16", }, @@ -1069,22 +1069,22 @@ func TestToAPI(t *testing.T) { { name: "nested arrays change in top level element", args: args{ - st: userconfig.IntegrationTypes, - n: "clickhouse_kafka", + schemaType: userconfig.IntegrationTypes, + serviceName: "clickhouse_kafka", d: newTestResourceData( - map[string]interface{}{ - "clickhouse_kafka_user_config": []interface{}{ - map[string]interface{}{ - "tables": []interface{}{ - map[string]interface{}{ + map[string]any{ + "clickhouse_kafka_user_config": []any{ + map[string]any{ + "tables": []any{ + map[string]any{ "name": "foo", - "topics": []interface{}{ - map[string]interface{}{ + "topics": []any{ + map[string]any{ "name": "bar", }, }, - "columns": []interface{}{ - map[string]interface{}{ + "columns": []any{ + map[string]any{ "name": "baz", "type": "UInt16", }, @@ -1107,17 +1107,17 @@ func TestToAPI(t *testing.T) { false, ), }, - want: map[string]interface{}{ - "tables": []interface{}{ - map[string]interface{}{ + want: map[string]any{ + "tables": []any{ + map[string]any{ "name": "foo", - "topics": []interface{}{ - map[string]interface{}{ + "topics": []any{ + map[string]any{ "name": "bar", }, }, - "columns": []interface{}{ - map[string]interface{}{ + "columns": []any{ + map[string]any{ "name": "baz", "type": "UInt16", }, @@ -1130,7 +1130,7 @@ func TestToAPI(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, _ := ToAPI(tt.args.st, tt.args.n, tt.args.d) + got, _ := ToAPI(tt.args.schemaType, tt.args.serviceName, tt.args.d) if !cmp.Equal(got, tt.want) { t.Errorf(cmp.Diff(tt.want, got)) diff --git a/internal/schemautil/userconfig/apiconvert/util.go b/internal/schemautil/userconfig/apiconvert/util.go index 84a3b472e..5a2ee0ec5 100644 --- a/internal/schemautil/userconfig/apiconvert/util.go +++ b/internal/schemautil/userconfig/apiconvert/util.go @@ -8,37 +8,36 @@ import ( // propsReqs is a function that returns a map of properties and required properties from a given schema type and node // name. -func propsReqs(st userconfig.SchemaType, n string) (map[string]interface{}, map[string]struct{}, error) { - rm, err := userconfig.CachedRepresentationMap(st) +func propsReqs(schemaType userconfig.SchemaType, nodeName string) (map[string]any, map[string]struct{}, error) { + representationMap, err := userconfig.CachedRepresentationMap(schemaType) if err != nil { return nil, nil, err } - s, ok := rm[n] - if !ok { - return nil, nil, fmt.Errorf("no schema found for %s (type %d)", n, st) + nodeSchema, exists := representationMap[nodeName] + if !exists { + return nil, nil, fmt.Errorf("no schema found for %s (type %d)", nodeName, schemaType) } - as, ok := s.(map[string]interface{}) + schemaAsMap, ok := nodeSchema.(map[string]any) if !ok { - return nil, nil, fmt.Errorf("schema %s (type %d) is not a map", n, st) + return nil, nil, fmt.Errorf("schema %s (type %d) is not a map", nodeName, schemaType) } - p, ok := as["properties"] - if !ok { - return nil, nil, fmt.Errorf("no properties found for %s (type %d)", n, st) + properties, exists := schemaAsMap["properties"] + if !exists { + return nil, nil, fmt.Errorf("no properties found for %s (type %d)", nodeName, schemaType) } - ap, ok := p.(map[string]interface{}) + propertiesAsMap, ok := properties.(map[string]any) if !ok { - return nil, nil, fmt.Errorf("properties of schema %s (type %d) are not a map", n, st) + return nil, nil, fmt.Errorf("properties of schema %s (type %d) are not a map", nodeName, schemaType) } - reqs := map[string]struct{}{} - - if sreqs, ok := as["required"].([]interface{}); ok { - reqs = userconfig.SliceToKeyedMap(sreqs) + requiredProperties := map[string]struct{}{} + if requiredPropertiesSlice, exists := schemaAsMap["required"].([]any); exists { + requiredProperties = userconfig.SliceToKeyedMap(requiredPropertiesSlice) } - return ap, reqs, nil + return propertiesAsMap, requiredProperties, nil } diff --git a/internal/schemautil/userconfig/apiconvert/util_test.go b/internal/schemautil/userconfig/apiconvert/util_test.go index dc2e91c43..230445d5a 100644 --- a/internal/schemautil/userconfig/apiconvert/util_test.go +++ b/internal/schemautil/userconfig/apiconvert/util_test.go @@ -11,68 +11,68 @@ import ( // TestPropsReqs is a test for propsReqs. func TestPropsReqs(t *testing.T) { type args struct { - st userconfig.SchemaType - n string + schemaType userconfig.SchemaType + serviceName string } tests := []struct { name string args args want struct { - wantP map[string]interface{} + wantP map[string]any wantR map[string]struct{} } }{ { name: "basic", args: args{ - st: userconfig.IntegrationEndpointTypes, - n: "rsyslog", + schemaType: userconfig.IntegrationEndpointTypes, + serviceName: "rsyslog", }, want: struct { - wantP map[string]interface{} + wantP map[string]any wantR map[string]struct{} }{ - map[string]interface{}{ - "ca": map[string]interface{}{ + map[string]any{ + "ca": map[string]any{ "example": "-----BEGIN CERTIFICATE-----\n...\n-----END CERTIFICATE-----\n", "max_length": 16384, "title": "PEM encoded CA certificate", - "type": []interface{}{ + "type": []any{ "string", "null", }, }, - "cert": map[string]interface{}{ + "cert": map[string]any{ "example": "-----BEGIN CERTIFICATE-----\n...\n-----END CERTIFICATE-----\n", "max_length": 16384, "title": "PEM encoded client certificate", - "type": []interface{}{ + "type": []any{ "string", "null", }, }, - "format": map[string]interface{}{ + "format": map[string]any{ "default": "rfc5424", - "enum": []interface{}{ - map[string]interface{}{"value": "rfc5424"}, - map[string]interface{}{"value": "rfc3164"}, - map[string]interface{}{"value": "custom"}, + "enum": []any{ + map[string]any{"value": "rfc5424"}, + map[string]any{"value": "rfc3164"}, + map[string]any{"value": "custom"}, }, "example": "rfc5424", "title": "message format", "type": "string", }, - "key": map[string]interface{}{ + "key": map[string]any{ "example": "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n", "max_length": 16384, "title": "PEM encoded client key", - "type": []interface{}{ + "type": []any{ "string", "null", }, }, - "logline": map[string]interface{}{ + "logline": map[string]any{ "example": "<%pri%>%timestamp:::date-rfc3339% %HOSTNAME% %app-name% %msg%", "max_length": 512, "min_length": 1, @@ -80,7 +80,7 @@ func TestPropsReqs(t *testing.T) { "title": "custom syslog message format", "type": "string", }, - "port": map[string]interface{}{ + "port": map[string]any{ "default": "514", "example": "514", "maximum": 65535, @@ -88,23 +88,23 @@ func TestPropsReqs(t *testing.T) { "title": "rsyslog server port", "type": "integer", }, - "sd": map[string]interface{}{ + "sd": map[string]any{ "example": "TOKEN tag=\"LiteralValue\"", "max_length": 1024, "title": "Structured data block for log message", - "type": []interface{}{ + "type": []any{ "string", "null", }, }, - "server": map[string]interface{}{ + "server": map[string]any{ "example": "logs.example.com", "max_length": 255, "min_length": 4, "title": "rsyslog server IP address or hostname", "type": "string", }, - "tls": map[string]interface{}{ + "tls": map[string]any{ "default": true, "example": true, "title": "Require TLS", @@ -123,7 +123,7 @@ func TestPropsReqs(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - gotP, gotR, _ := propsReqs(tt.args.st, tt.args.n) + gotP, gotR, _ := propsReqs(tt.args.schemaType, tt.args.serviceName) if !cmp.Equal(gotP, tt.want.wantP) { t.Errorf(cmp.Diff(tt.want.wantP, gotP)) diff --git a/internal/schemautil/userconfig/convert.go b/internal/schemautil/userconfig/convert.go index fce6d647a..a3ab192c1 100644 --- a/internal/schemautil/userconfig/convert.go +++ b/internal/schemautil/userconfig/convert.go @@ -9,86 +9,94 @@ import ( ) // convertPropertyToSchema is a function that converts a property to a Terraform schema. -func convertPropertyToSchema(n string, p map[string]interface{}, t string, ad bool, ireq bool) jen.Dict { - r := jen.Dict{ - jen.Id("Type"): jen.Qual(SchemaPackage, t), +func convertPropertyToSchema( + propertyName string, + propertyAttributes map[string]any, + terraformType string, + addDescription bool, + isRequired bool, +) jen.Dict { + resultDict := jen.Dict{ + jen.Id("Type"): jen.Qual(SchemaPackage, terraformType), } - if ad { - id, d := descriptionForProperty(p, t) + if addDescription { + isDeprecated, description := descriptionForProperty(propertyAttributes, terraformType) - r[jen.Id("Description")] = jen.Lit(d) + resultDict[jen.Id("Description")] = jen.Lit(description) - if id { - r[jen.Id("Deprecated")] = jen.Lit("Usage of this field is discouraged.") + if isDeprecated { + resultDict[jen.Id("Deprecated")] = jen.Lit("Usage of this field is discouraged.") } } - if ireq { - r[jen.Id("Required")] = jen.Lit(true) + if isRequired { + resultDict[jen.Id("Required")] = jen.Lit(true) } else { - r[jen.Id("Optional")] = jen.Lit(true) + resultDict[jen.Id("Optional")] = jen.Lit(true) - if d, ok := p["default"]; ok && isTerraformTypePrimitive(t) { - r[jen.Id("Default")] = jen.Lit(d) + if defaultValue, ok := propertyAttributes["default"]; ok && isTerraformTypePrimitive(terraformType) { + resultDict[jen.Id("Default")] = jen.Lit(defaultValue) } } - if co, ok := p["create_only"]; ok && co.(bool) { - r[jen.Id("ForceNew")] = jen.Lit(true) + if createOnly, ok := propertyAttributes["create_only"]; ok && createOnly.(bool) { + resultDict[jen.Id("ForceNew")] = jen.Lit(true) } - if strings.Contains(n, "api_key") || strings.Contains(n, "password") { - r[jen.Id("Sensitive")] = jen.Lit(true) + if strings.Contains(propertyName, "api_key") || strings.Contains(propertyName, "password") { + resultDict[jen.Id("Sensitive")] = jen.Lit(true) } // TODO: Generate validation rules for generated schema properties, also validate that value is within enum values. - return r + return resultDict } // convertPropertiesToSchemaMap is a function that converts a map of properties to a map of Terraform schemas. -func convertPropertiesToSchemaMap(p map[string]interface{}, req map[string]struct{}) (jen.Dict, error) { - r := make(jen.Dict, len(p)) +func convertPropertiesToSchemaMap(properties map[string]any, requiredProperties map[string]struct{}) (jen.Dict, error) { + resultDict := make(jen.Dict, len(properties)) - for k, v := range p { - va, ok := v.(map[string]interface{}) + for propertyName, propertyValue := range properties { + propertyAttributes, ok := propertyValue.(map[string]any) if !ok { continue } - ts, ats, err := TerraformTypes(SlicedString(va["type"])) + terraformTypes, aivenTypes, err := TerraformTypes(SlicedString(propertyAttributes["type"])) if err != nil { return nil, err } - if len(ts) > 1 { - return nil, fmt.Errorf("multiple types for %s", k) + if len(terraformTypes) > 1 { + return nil, fmt.Errorf("multiple types for %s", propertyName) } - t, at := ts[0], ats[0] + terraformType, aivenType := terraformTypes[0], aivenTypes[0] - _, ireq := req[k] + _, isRequired := requiredProperties[propertyName] - var s map[string]*jen.Statement + var schemaStatements map[string]*jen.Statement - if isTerraformTypePrimitive(t) { - s = handlePrimitiveTypeProperty(k, va, t, ireq) + if isTerraformTypePrimitive(terraformType) { + schemaStatements = handlePrimitiveTypeProperty(propertyName, propertyAttributes, terraformType, isRequired) } else { - s, err = handleAggregateTypeProperty(k, va, t, at) + schemaStatements, err = handleAggregateTypeProperty( + propertyName, propertyAttributes, terraformType, aivenType, + ) if err != nil { return nil, err } } - if s == nil { + if schemaStatements == nil { continue } - for kn, vn := range s { - r[jen.Lit(EncodeKey(kn))] = vn + for keyName, valueNode := range schemaStatements { + resultDict[jen.Lit(EncodeKey(keyName))] = valueNode } } - return r, nil + return resultDict, nil } diff --git a/internal/schemautil/userconfig/desc.go b/internal/schemautil/userconfig/desc.go index f9dc16fd1..b1633d7da 100644 --- a/internal/schemautil/userconfig/desc.go +++ b/internal/schemautil/userconfig/desc.go @@ -7,29 +7,14 @@ import ( // DescriptionBuilder is a helper to build complex descriptions in a consistent way. type DescriptionBuilder struct { - // base is the base description. - base string - - // withForcedFirstLetterCapitalization is a flag that indicates if the first letter should be capitalized. + base string withForcedFirstLetterCapitalization bool - - // withPossibleValues is a flag that indicates if the possible values should be included in the description. - withPossibleValues []interface{} - - // withRequiredWith is a flag that indicates if the required with should be included in the description. - withRequiredWith []string - - // withMaxLen is a flag that indicates if the max length should be included in the description. - withMaxLen int - - // withDefaultValue is a flag that indicates if the default value should be included in the description. - withDefaultValue interface{} - - // withUseReference is a flag that indicates if the use reference should be included in the description. - withUseReference bool - - // withForceNew is a flag that indicates if the force new should be included in the description. - withForceNew bool + withPossibleValues []any + withRequiredWith []string + withMaxLen int + withDefaultValue any + withUseReference bool + withForceNew bool } // Desc is a function that creates a new DescriptionBuilder. @@ -44,26 +29,26 @@ func (db *DescriptionBuilder) ForceFirstLetterCapitalization() *DescriptionBuild } // PossibleValues is a function that sets the withPossibleValues flag. -func (db *DescriptionBuilder) PossibleValues(vv ...interface{}) *DescriptionBuilder { - db.withPossibleValues = vv +func (db *DescriptionBuilder) PossibleValues(values ...any) *DescriptionBuilder { + db.withPossibleValues = values return db } // RequiredWith is a function that sets the withRequiredWith flag. -func (db *DescriptionBuilder) RequiredWith(sv ...string) *DescriptionBuilder { - db.withRequiredWith = sv +func (db *DescriptionBuilder) RequiredWith(values ...string) *DescriptionBuilder { + db.withRequiredWith = values return db } // MaxLen is a function that sets the withMaxLen flag. -func (db *DescriptionBuilder) MaxLen(i int) *DescriptionBuilder { - db.withMaxLen = i +func (db *DescriptionBuilder) MaxLen(length int) *DescriptionBuilder { + db.withMaxLen = length return db } // DefaultValue is a function that sets the withDefaultValue flag. -func (db *DescriptionBuilder) DefaultValue(v interface{}) *DescriptionBuilder { - db.withDefaultValue = v +func (db *DescriptionBuilder) DefaultValue(value any) *DescriptionBuilder { + db.withDefaultValue = value return db } @@ -81,85 +66,72 @@ func (db *DescriptionBuilder) ForceNew() *DescriptionBuilder { // Build is a function that builds the description. func (db *DescriptionBuilder) Build() string { - b := new(strings.Builder) + builder := new(strings.Builder) // Capitalize the first letter, if needed. if db.withForcedFirstLetterCapitalization { - b.WriteRune(rune(strings.ToUpper(string(db.base[0]))[0])) - - b.WriteString(db.base[1:]) + builder.WriteRune(rune(strings.ToUpper(string(db.base[0]))[0])) + builder.WriteString(db.base[1:]) } else { - b.WriteString(db.base) + builder.WriteString(db.base) } // Add a trailing dot if it's missing. if !strings.HasSuffix(db.base, ".") { - b.WriteString(".") + builder.WriteString(".") } if db.withPossibleValues != nil { - b.WriteRune(' ') - - b.WriteString("The possible values are ") - - for i := range db.withPossibleValues { + builder.WriteRune(' ') + builder.WriteString("The possible values are ") + for i, value := range db.withPossibleValues { if i > 0 { if i == len(db.withPossibleValues)-1 { - b.WriteString(" and ") + builder.WriteString(" and ") } else { - b.WriteString(", ") + builder.WriteString(", ") } } - - b.WriteString(fmt.Sprintf("`%v`", db.withPossibleValues[i])) + builder.WriteString(fmt.Sprintf("`%v`", value)) } - - b.WriteRune('.') + builder.WriteRune('.') } if db.withRequiredWith != nil { - b.WriteRune(' ') - - b.WriteString("The field is required with") - - for i := range db.withRequiredWith { + builder.WriteRune(' ') + builder.WriteString("The field is required with") + for i, value := range db.withRequiredWith { if i > 0 { if i == len(db.withRequiredWith)-1 { - b.WriteString(" and ") + builder.WriteString(" and ") } else { - b.WriteString(", ") + builder.WriteString(", ") } } - - b.WriteString(fmt.Sprintf("`%v`", db.withRequiredWith[i])) + builder.WriteString(fmt.Sprintf("`%v`", value)) } - - b.WriteRune('.') + builder.WriteRune('.') } if db.withMaxLen > 0 { - b.WriteRune(' ') - - b.WriteString(fmt.Sprintf("Maximum length: `%v`.", db.withMaxLen)) + builder.WriteRune(' ') + builder.WriteString(fmt.Sprintf("Maximum length: `%v`.", db.withMaxLen)) } if db.withDefaultValue != nil { - b.WriteRune(' ') - - b.WriteString(fmt.Sprintf("The default value is `%v`.", db.withDefaultValue)) + builder.WriteRune(' ') + builder.WriteString(fmt.Sprintf("The default value is `%v`.", db.withDefaultValue)) } if db.withUseReference { - b.WriteRune(' ') - - b.WriteString("To set up proper dependencies please refer to this variable as a reference.") + builder.WriteRune(' ') + builder.WriteString("To set up proper dependencies please refer to this variable as a reference.") } if db.withForceNew { - b.WriteRune(' ') - - b.WriteString("This property cannot be changed, doing so forces recreation of the resource.") + builder.WriteRune(' ') + builder.WriteString("This property cannot be changed, doing so forces recreation of the resource.") } - return b.String() + return builder.String() } diff --git a/internal/schemautil/userconfig/dist/integration_endpoint_types.go b/internal/schemautil/userconfig/dist/integration_endpoint_types.go index 55989ada9..4369fec7f 100644 --- a/internal/schemautil/userconfig/dist/integration_endpoint_types.go +++ b/internal/schemautil/userconfig/dist/integration_endpoint_types.go @@ -3,8 +3,9 @@ package dist import ( - schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" ) // IntegrationEndpointTypeDatadog is a generated function returning the schema of the datadog IntegrationEndpointType. diff --git a/internal/schemautil/userconfig/dist/integration_types.go b/internal/schemautil/userconfig/dist/integration_types.go index 66b10740f..de191b448 100644 --- a/internal/schemautil/userconfig/dist/integration_types.go +++ b/internal/schemautil/userconfig/dist/integration_types.go @@ -3,8 +3,9 @@ package dist import ( - schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" ) // IntegrationTypeClickhouseKafka is a generated function returning the schema of the clickhouse_kafka IntegrationType. diff --git a/internal/schemautil/userconfig/dist/service_types.go b/internal/schemautil/userconfig/dist/service_types.go index 2945edc6e..95cb0d759 100644 --- a/internal/schemautil/userconfig/dist/service_types.go +++ b/internal/schemautil/userconfig/dist/service_types.go @@ -3,8 +3,9 @@ package dist import ( - schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" ) // ServiceTypeCassandra is a generated function returning the schema of the cassandra ServiceType. diff --git a/internal/schemautil/userconfig/handle.go b/internal/schemautil/userconfig/handle.go index 63fea1721..90ab028b7 100644 --- a/internal/schemautil/userconfig/handle.go +++ b/internal/schemautil/userconfig/handle.go @@ -9,68 +9,81 @@ import ( ) // handlePrimitiveTypeProperty is a function that converts a primitive type property to a Terraform schema. -func handlePrimitiveTypeProperty(n string, p map[string]interface{}, t string, ireq bool) map[string]*jen.Statement { - return map[string]*jen.Statement{n: jen.Values(convertPropertyToSchema(n, p, t, true, ireq))} +func handlePrimitiveTypeProperty( + name string, + property map[string]any, + typeStr string, + isRequired bool, +) map[string]*jen.Statement { + return map[string]*jen.Statement{ + name: jen.Values(convertPropertyToSchema(name, property, typeStr, true, isRequired)), + } } -// handleObjectProperty is a function that converts an object type property to a Terraform schema. +// handleObjectProperty converts an object type property to a Terraform schema. func handleObjectProperty( - n string, - p map[string]interface{}, - t string, - req map[string]struct{}, + objectName string, + propertyMap map[string]any, + typeString string, + requiredProperties map[string]struct{}, ) (map[string]*jen.Statement, error) { - pa, ok := p["properties"].(map[string]interface{}) - if !ok { - it, ok := p["items"].(map[string]interface{}) - if ok { - pa, ok = it["properties"].(map[string]interface{}) + properties, propertiesExist := propertyMap["properties"].(map[string]any) + if !propertiesExist { + itemsAt, itemsExist := propertyMap["items"].(map[string]any) + if itemsExist { + properties, propertiesExist = itemsAt["properties"].(map[string]any) } - if !ok { - return nil, fmt.Errorf("unable to get properties field: %#v", p) + if !propertiesExist { + return nil, fmt.Errorf("unable to get properties field: %#v", propertyMap) } } - r := convertPropertyToSchema(n, p, t, true, false) + resourceStatements := convertPropertyToSchema( + objectName, propertyMap, typeString, true, false, + ) - pc, err := convertPropertiesToSchemaMap(pa, req) + schemaMapAt, err := convertPropertiesToSchemaMap(properties, requiredProperties) if err != nil { return nil, err } - s := jen.Map(jen.String()).Op("*").Qual(SchemaPackage, "Schema").Values(pc) + schemaValues := jen.Map(jen.String()).Op("*").Qual(SchemaPackage, "Schema").Values(schemaMapAt) - r[jen.Id("Elem")] = jen.Op("&").Qual(SchemaPackage, "Resource").Values(jen.Dict{ - jen.Id("Schema"): s, + resourceStatements[jen.Id("Elem")] = jen.Op("&").Qual(SchemaPackage, "Resource").Values(jen.Dict{ + jen.Id("Schema"): schemaValues, }) // TODO: Check if we can access the schema via diff suppression function. - r[jen.Id("DiffSuppressFunc")] = jen.Qual(SchemaUtilPackage, "EmptyObjectDiffSuppressFuncSkipArrays").Call(s) + resourceStatements[jen.Id("DiffSuppressFunc")] = jen.Qual( + SchemaUtilPackage, "EmptyObjectDiffSuppressFuncSkipArrays", + ).Call(schemaValues) - r[jen.Id("MaxItems")] = jen.Lit(1) + resourceStatements[jen.Id("MaxItems")] = jen.Lit(1) - return map[string]*jen.Statement{n: jen.Values(r)}, nil + return map[string]*jen.Statement{objectName: jen.Values(resourceStatements)}, nil } // handleArrayOfPrimitiveTypeProperty is a function that converts an array of primitive type property to a Terraform // schema. -func handleArrayOfPrimitiveTypeProperty(n string, t string) *jen.Statement { - r := jen.Dict{ - jen.Id("Type"): jen.Qual(SchemaPackage, t), +func handleArrayOfPrimitiveTypeProperty(propertyName string, terraformType string) *jen.Statement { + propertyAttributes := jen.Dict{ + jen.Id("Type"): jen.Qual(SchemaPackage, terraformType), } - if n == "ip_filter" { + if propertyName == "ip_filter" { // TODO: Add ip_filter_object to this sanity check when DiffSuppressFunc is implemented for it. - r[jen.Id("DiffSuppressFunc")] = jen.Qual(SchemaUtilPackage, "IPFilterValueDiffSuppressFunc") + propertyAttributes[jen.Id("DiffSuppressFunc")] = jen.Qual( + SchemaUtilPackage, "IPFilterValueDiffSuppressFunc", + ) } - return jen.Op("&").Qual(SchemaPackage, "Schema").Values(r) + return jen.Op("&").Qual(SchemaPackage, "Schema").Values(propertyAttributes) } // handleArrayOfAggregateTypeProperty is a function that converts an array of aggregate type property to a Terraform // schema. -func handleArrayOfAggregateTypeProperty(ip map[string]interface{}, req map[string]struct{}) (*jen.Statement, error) { +func handleArrayOfAggregateTypeProperty(ip map[string]any, req map[string]struct{}) (*jen.Statement, error) { pc, err := convertPropertiesToSchemaMap(ip, req) if err != nil { return nil, err @@ -83,190 +96,198 @@ func handleArrayOfAggregateTypeProperty(ip map[string]interface{}, req map[strin // handleArrayProperty is a function that converts an array type property to a Terraform schema. func handleArrayProperty( - n string, - p map[string]interface{}, - t string, + propertyName string, + propertyMap map[string]any, + terraformType string, ) (map[string]*jen.Statement, error) { - ia, ok := p["items"].(map[string]interface{}) + itemAttributes, ok := propertyMap["items"].(map[string]any) if !ok { - return nil, fmt.Errorf("items is not a map[string]interface{}: %#v", p) + return nil, fmt.Errorf("items is not a map[string]any: %#v", propertyMap) } - var e *jen.Statement + var element *jen.Statement - var tn, atn []string + var terraformNames, aivenTypeNames []string var err error - oos, iof := ia["one_of"].([]interface{}) - if iof { - var ct []string + oneOfOptions, isOneOf := itemAttributes["one_of"].([]any) + if isOneOf { + var complexTypes []string - for _, v := range oos { - va, ok := v.(map[string]interface{}) + for _, v := range oneOfOptions { + oneOfMap, ok := v.(map[string]any) if !ok { - return nil, fmt.Errorf("one_of element is not a map[string]interface{}: %#v", v) + return nil, fmt.Errorf("one_of element is not a map[string]any: %#v", v) } - ct = append(ct, va["type"].(string)) + complexTypes = append(complexTypes, oneOfMap["type"].(string)) } - tn, atn, err = TerraformTypes(ct) + terraformNames, aivenTypeNames, err = TerraformTypes(complexTypes) if err != nil { return nil, err } } else { - tn, atn, err = TerraformTypes(SlicedString(ia["type"])) + terraformNames, aivenTypeNames, err = TerraformTypes(SlicedString(itemAttributes["type"])) if err != nil { return nil, err } } - r := make(map[string]*jen.Statement) + result := make(map[string]*jen.Statement) - for k, v := range tn { - an := n + for k, terraformName := range terraformNames { + adjustedName := propertyName - if len(tn) > 1 { - an = fmt.Sprintf("%s_%s", n, atn[k]) + if len(terraformNames) > 1 { + adjustedName = fmt.Sprintf("%s_%s", propertyName, aivenTypeNames[k]) // TODO: Remove with the next major version. - if an == "ip_filter_string" { - an = "ip_filter" + if adjustedName == "ip_filter_string" { + adjustedName = "ip_filter" } // TODO: Remove with the next major version. - if an == "namespaces_string" { - an = "namespaces" + if adjustedName == "namespaces_string" { + adjustedName = "namespaces" } } - var ooia map[string]interface{} + var oneOfItemAttributes map[string]any - if iof { - ooia, ok = oos[k].(map[string]interface{}) + if isOneOf { + oneOfItemAttributes, ok = oneOfOptions[k].(map[string]any) if !ok { - return nil, fmt.Errorf("unable to convert one_of item to map[string]interface{}: %#v", oos[k]) + return nil, + fmt.Errorf("unable to convert one_of item to map[string]any: %#v", oneOfOptions[k]) } } - if isTerraformTypePrimitive(v) { - e = handleArrayOfPrimitiveTypeProperty(an, v) + if isTerraformTypePrimitive(terraformName) { + element = handleArrayOfPrimitiveTypeProperty(adjustedName, terraformName) } else { - var ipa map[string]interface{} + var itemProperties map[string]any - if iof { - ipa, ok = ooia["properties"].(map[string]interface{}) + if isOneOf { + itemProperties, ok = oneOfItemAttributes["properties"].(map[string]any) if !ok { return nil, fmt.Errorf( - "unable to convert one_of item properties to map[string]interface{}: %#v", - ooia, + "unable to convert one_of item properties to map[string]any: %#v", + oneOfItemAttributes, ) } } else { - ipa, ok = ia["properties"].(map[string]interface{}) + itemProperties, ok = itemAttributes["properties"].(map[string]any) if !ok { - return nil, fmt.Errorf("could not find properties in an array of aggregate type: %#v", p) + return nil, + fmt.Errorf("could not find properties in an array of aggregate type: %#v", propertyMap) } } - req := map[string]struct{}{} + requiredProperties := map[string]struct{}{} - if sreq, ok := ia["required"].([]interface{}); ok { - req = SliceToKeyedMap(sreq) + if requiredItems, ok := itemAttributes["required"].([]any); ok { + requiredProperties = SliceToKeyedMap(requiredItems) } - e, err = handleArrayOfAggregateTypeProperty(ipa, req) + element, err = handleArrayOfAggregateTypeProperty(itemProperties, requiredProperties) if err != nil { return nil, err } } - s := convertPropertyToSchema(n, p, t, !iof, false) + schema := convertPropertyToSchema(propertyName, propertyMap, terraformType, !isOneOf, false) - if iof { - ooiat, ok := ooia["type"].(string) + if isOneOf { + oneOfType, ok := oneOfItemAttributes["type"].(string) if !ok { - return nil, fmt.Errorf("one_of item type is not a string: %#v", ooia) + return nil, fmt.Errorf("one_of item type is not a string: %#v", oneOfItemAttributes) } - _, dpv := descriptionForProperty(p, t) + _, defaultPropertyDescription := descriptionForProperty(propertyMap, terraformType) - dooiid, dooid := descriptionForProperty(ooia, ooiat) + deprecationIndicator, oneOfItemDescription := descriptionForProperty(oneOfItemAttributes, oneOfType) - s[jen.Id("Description")] = jen.Lit(fmt.Sprintf("%s %s", dpv, dooid)) + schema[jen.Id("Description")] = jen.Lit( + fmt.Sprintf("%s %s", defaultPropertyDescription, oneOfItemDescription), + ) - if dooiid { - s[jen.Id("Deprecated")] = jen.Lit("Usage of this field is discouraged.") + if deprecationIndicator { + schema[jen.Id("Deprecated")] = jen.Lit("Usage of this field is discouraged.") } } - s[jen.Id("Elem")] = e + schema[jen.Id("Elem")] = element - if an == "ip_filter" { + if adjustedName == "ip_filter" { // TODO: Add ip_filter_object to this sanity check when DiffSuppressFunc is implemented for it. - s[jen.Id("DiffSuppressFunc")] = jen.Qual(SchemaUtilPackage, "IPFilterArrayDiffSuppressFunc") + schema[jen.Id("DiffSuppressFunc")] = jen.Qual( + SchemaUtilPackage, "IPFilterArrayDiffSuppressFunc", + ) } - if mi, ok := p["max_items"].(int); ok { - s[jen.Id("MaxItems")] = jen.Lit(mi) + if maxItems, ok := propertyMap["max_items"].(int); ok { + schema[jen.Id("MaxItems")] = jen.Lit(maxItems) } - os := jen.Dict{} - for k, v := range s { - os[k] = v + orderedSchema := jen.Dict{} + for key, value := range schema { + orderedSchema[key] = value } // TODO: Remove with the next major version. - if an == "ip_filter" || (iof && an == "namespaces") { - s[jen.Id("Deprecated")] = jen.Lit( - fmt.Sprintf("This will be removed in v5.0.0 and replaced with %s_string instead.", an), + if adjustedName == "ip_filter" || (isOneOf && adjustedName == "namespaces") { + schema[jen.Id("Deprecated")] = jen.Lit( + fmt.Sprintf("This will be removed in v5.0.0 and replaced with %s_string instead.", adjustedName), ) } - r[an] = jen.Values(s) + result[adjustedName] = jen.Values(schema) - if an == "ip_filter" || (iof && an == "namespaces") { - r[fmt.Sprintf("%s_string", an)] = jen.Values(os) + if adjustedName == "ip_filter" || (isOneOf && adjustedName == "namespaces") { + result[fmt.Sprintf("%s_string", adjustedName)] = jen.Values(orderedSchema) } } - return r, nil + return result, nil } // handleAggregateTypeProperty is a function that converts an aggregate type property to a Terraform schema. func handleAggregateTypeProperty( - n string, - p map[string]interface{}, - t string, - at string, + propertyName string, + propertyAttributes map[string]any, + terraformType string, + aivenType string, ) (map[string]*jen.Statement, error) { - r := make(map[string]*jen.Statement) + resultStatements := make(map[string]*jen.Statement) - req := map[string]struct{}{} + requiredProperties := map[string]struct{}{} - if sreq, ok := p["required"].([]interface{}); ok { - req = SliceToKeyedMap(sreq) + if requiredSlice, ok := propertyAttributes["required"].([]any); ok { + requiredProperties = SliceToKeyedMap(requiredSlice) } - switch at { + switch aivenType { case "object": - v, err := handleObjectProperty(n, p, t, req) + objectStatements, err := handleObjectProperty( + propertyName, propertyAttributes, terraformType, requiredProperties, + ) if err != nil { return nil, err } - maps.Copy(r, v) + maps.Copy(resultStatements, objectStatements) case "array": - v, err := handleArrayProperty(n, p, t) + arrayStatements, err := handleArrayProperty(propertyName, propertyAttributes, terraformType) if err != nil { return nil, err } - maps.Copy(r, v) + maps.Copy(resultStatements, arrayStatements) default: - return nil, fmt.Errorf("unknown aggregate type: %s", at) + return nil, fmt.Errorf("unknown aggregate type: %s", aivenType) } - return r, nil + return resultStatements, nil } diff --git a/internal/schemautil/userconfig/stateupgrader/typeupgrader/typeupgrader.go b/internal/schemautil/userconfig/stateupgrader/typeupgrader/typeupgrader.go index 50485c88f..748980200 100644 --- a/internal/schemautil/userconfig/stateupgrader/typeupgrader/typeupgrader.go +++ b/internal/schemautil/userconfig/stateupgrader/typeupgrader/typeupgrader.go @@ -6,14 +6,14 @@ import ( ) // Map upgrades map values to the specified types. -func Map(m map[string]interface{}, rules map[string]string) (err error) { - for k, t := range rules { - va, ok := m[k].(string) +func Map(valueMap map[string]any, typeRules map[string]string) (err error) { + for key, targetType := range typeRules { + valueAsString, ok := valueMap[key].(string) if !ok { continue } - m[k], err = convert(va, t) + valueMap[key], err = convert(valueAsString, targetType) if err != nil { return err } @@ -23,14 +23,14 @@ func Map(m map[string]interface{}, rules map[string]string) (err error) { } // Slice upgrades slice values to the specified type. -func Slice(s []interface{}, t string) (err error) { - for i, v := range s { - va, ok := v.(string) +func Slice(valueSlice []any, targetType string) (err error) { + for index, value := range valueSlice { + valueAsString, ok := value.(string) if !ok { continue } - s[i], err = convert(va, t) + valueSlice[index], err = convert(valueAsString, targetType) if err != nil { return err } @@ -40,27 +40,27 @@ func Slice(s []interface{}, t string) (err error) { } // convert converts a value to the specified type. -func convert(v string, t string) (res interface{}, err error) { - switch t { +func convert(value string, targetType string) (convertedValue any, err error) { + switch targetType { case "bool": - if v == "" { - v = "false" + if value == "" { + value = "false" } - return strconv.ParseBool(v) + return strconv.ParseBool(value) case "int": - if v == "" { - v = "0" + if value == "" { + value = "0" } - return strconv.Atoi(v) + return strconv.Atoi(value) case "float": - if v == "" { - v = "0" + if value == "" { + value = "0" } - return strconv.ParseFloat(v, 64) + return strconv.ParseFloat(value, 64) default: - return nil, fmt.Errorf("unsupported type %q", t) + return nil, fmt.Errorf("unsupported type %q", targetType) } } diff --git a/internal/schemautil/userconfig/stateupgrader/typeupgrader/typeupgrader_test.go b/internal/schemautil/userconfig/stateupgrader/typeupgrader/typeupgrader_test.go index 7c3c0ef02..eb25357c7 100644 --- a/internal/schemautil/userconfig/stateupgrader/typeupgrader/typeupgrader_test.go +++ b/internal/schemautil/userconfig/stateupgrader/typeupgrader/typeupgrader_test.go @@ -9,26 +9,26 @@ import ( // TestMap is a test for Map. func TestMap(t *testing.T) { type args struct { - m map[string]interface{} - rules map[string]string + mapInput map[string]any + typeRules map[string]string } tests := []struct { name string args args - want map[string]interface{} + want map[string]any wantErr bool }{ { name: "basic", args: args{ - m: map[string]interface{}{ + mapInput: map[string]any{ "bool": "true", "int": "1", }, - rules: map[string]string{}, + typeRules: map[string]string{}, }, - want: map[string]interface{}{ + want: map[string]any{ "bool": "true", "int": "1", }, @@ -36,15 +36,15 @@ func TestMap(t *testing.T) { { name: "bool", args: args{ - m: map[string]interface{}{ + mapInput: map[string]any{ "bool": "true", "int": "1", }, - rules: map[string]string{ + typeRules: map[string]string{ "bool": "bool", }, }, - want: map[string]interface{}{ + want: map[string]any{ "bool": true, "int": "1", }, @@ -52,15 +52,15 @@ func TestMap(t *testing.T) { { name: "int", args: args{ - m: map[string]interface{}{ + mapInput: map[string]any{ "bool": "true", "int": "1", }, - rules: map[string]string{ + typeRules: map[string]string{ "int": "int", }, }, - want: map[string]interface{}{ + want: map[string]any{ "bool": "true", "int": 1, }, @@ -68,16 +68,16 @@ func TestMap(t *testing.T) { { name: "bool and int", args: args{ - m: map[string]interface{}{ + mapInput: map[string]any{ "bool": "true", "int": "1", }, - rules: map[string]string{ + typeRules: map[string]string{ "bool": "bool", "int": "int", }, }, - want: map[string]interface{}{ + want: map[string]any{ "bool": true, "int": 1, }, @@ -85,26 +85,26 @@ func TestMap(t *testing.T) { { name: "complex map", args: args{ - m: map[string]interface{}{ + mapInput: map[string]any{ "bool": "true", "int": "1", - "map": []interface{}{ - map[string]interface{}{ + "map": []any{ + map[string]any{ "bool": "true", "int": "1", }, }, }, - rules: map[string]string{ + typeRules: map[string]string{ "bool": "bool", "int": "int", }, }, - want: map[string]interface{}{ + want: map[string]any{ "bool": true, "int": 1, - "map": []interface{}{ - map[string]interface{}{ + "map": []any{ + map[string]any{ "bool": "true", "int": "1", }, @@ -114,11 +114,11 @@ func TestMap(t *testing.T) { { name: "bool and int with error", args: args{ - m: map[string]interface{}{ + mapInput: map[string]any{ "bool": "true", "int": "foo", }, - rules: map[string]string{ + typeRules: map[string]string{ "bool": "bool", "int": "int", }, @@ -128,11 +128,11 @@ func TestMap(t *testing.T) { { name: "unknown type", args: args{ - m: map[string]interface{}{ + mapInput: map[string]any{ "bool": "true", "int": "1", }, - rules: map[string]string{ + typeRules: map[string]string{ "bool": "bool", "int": "foo", }, @@ -143,15 +143,15 @@ func TestMap(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := Map(tt.args.m, tt.args.rules) + err := Map(tt.args.mapInput, tt.args.typeRules) if (err != nil) != tt.wantErr { t.Errorf("UpgradeMap() error = %v, wantErr %v", err, tt.wantErr) return } - if !tt.wantErr && !cmp.Equal(tt.args.m, tt.want) { - t.Errorf(cmp.Diff(tt.want, tt.args.m)) + if !tt.wantErr && !cmp.Equal(tt.args.mapInput, tt.want) { + t.Errorf(cmp.Diff(tt.want, tt.args.mapInput)) } }) } @@ -160,37 +160,37 @@ func TestMap(t *testing.T) { // TestSlice is a test for Slice. func TestSlice(t *testing.T) { type args struct { - s []interface{} - t string + sliceInput []any + typeInput string } tests := []struct { name string args args - want []interface{} + want []any wantErr bool }{ { name: "int", args: args{ - s: []interface{}{"1", "3", "3", "7"}, - t: "int", + sliceInput: []any{"1", "3", "3", "7"}, + typeInput: "int", }, - want: []interface{}{1, 3, 3, 7}, + want: []any{1, 3, 3, 7}, }, { name: "int with error", args: args{ - s: []interface{}{"1", "foo", "3", "7"}, - t: "int", + sliceInput: []any{"1", "foo", "3", "7"}, + typeInput: "int", }, wantErr: true, }, { name: "unknown type", args: args{ - s: []interface{}{"1", "foo", "3", "7"}, - t: "foo", + sliceInput: []any{"1", "foo", "3", "7"}, + typeInput: "foo", }, wantErr: true, }, @@ -198,15 +198,15 @@ func TestSlice(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := Slice(tt.args.s, tt.args.t) + err := Slice(tt.args.sliceInput, tt.args.typeInput) if (err != nil) != tt.wantErr { t.Errorf("UpgradeSlice() error = %v, wantErr %v", err, tt.wantErr) return } - if !tt.wantErr && !cmp.Equal(tt.args.s, tt.want) { - t.Errorf(cmp.Diff(tt.want, tt.args.s)) + if !tt.wantErr && !cmp.Equal(tt.args.sliceInput, tt.want) { + t.Errorf(cmp.Diff(tt.want, tt.args.sliceInput)) } }) } diff --git a/internal/schemautil/userconfig/stateupgrader/v0/cassandra/cassandra.go b/internal/schemautil/userconfig/stateupgrader/v0/cassandra/cassandra.go index 7657b1cd8..c39ef7606 100644 --- a/internal/schemautil/userconfig/stateupgrader/v0/cassandra/cassandra.go +++ b/internal/schemautil/userconfig/stateupgrader/v0/cassandra/cassandra.go @@ -36,7 +36,6 @@ func ResourceCassandra() *schema.Resource { DeleteContext: schemautil.ResourceServiceDelete, CustomizeDiff: customdiff.Sequence( schemautil.SetServiceTypeIfEmpty(schemautil.ServiceTypeCassandra), - schemautil.CustomizeDiffDisallowMultipleManyToOneKeys, customdiff.IfValueChange("tag", schemautil.TagsShouldNotBeEmpty, schemautil.CustomizeDiffCheckUniqueTag, @@ -73,10 +72,10 @@ func ResourceCassandra() *schema.Resource { func ResourceCassandraStateUpgrade( _ context.Context, - rawState map[string]interface{}, - _ interface{}, -) (map[string]interface{}, error) { - userConfigSlice, ok := rawState["cassandra_user_config"].([]interface{}) + rawState map[string]any, + _ any, +) (map[string]any, error) { + userConfigSlice, ok := rawState["cassandra_user_config"].([]any) if !ok { return rawState, nil } @@ -85,7 +84,7 @@ func ResourceCassandraStateUpgrade( return rawState, nil } - userConfig, ok := userConfigSlice[0].(map[string]interface{}) + userConfig, ok := userConfigSlice[0].(map[string]any) if !ok { return rawState, nil } @@ -98,9 +97,9 @@ func ResourceCassandraStateUpgrade( return rawState, err } - cassandraSlice, ok := userConfig["cassandra"].([]interface{}) + cassandraSlice, ok := userConfig["cassandra"].([]any) if ok && len(cassandraSlice) > 0 { - cassandra, ok := cassandraSlice[0].(map[string]interface{}) + cassandra, ok := cassandraSlice[0].(map[string]any) if ok { err := typeupgrader.Map(cassandra, map[string]string{ "batch_size_fail_threshold_in_kb": "int", @@ -112,9 +111,9 @@ func ResourceCassandraStateUpgrade( } } - privateAccessSlice, ok := userConfig["private_access"].([]interface{}) + privateAccessSlice, ok := userConfig["private_access"].([]any) if ok && len(privateAccessSlice) > 0 { - privateAccess, ok := privateAccessSlice[0].(map[string]interface{}) + privateAccess, ok := privateAccessSlice[0].(map[string]any) if ok { err = typeupgrader.Map(privateAccess, map[string]string{ "prometheus": "bool", @@ -125,9 +124,9 @@ func ResourceCassandraStateUpgrade( } } - publicAccessSlice, ok := userConfig["public_access"].([]interface{}) + publicAccessSlice, ok := userConfig["public_access"].([]any) if ok && len(publicAccessSlice) > 0 { - publicAccess, ok := publicAccessSlice[0].(map[string]interface{}) + publicAccess, ok := publicAccessSlice[0].(map[string]any) if ok { err := typeupgrader.Map(publicAccess, map[string]string{ "prometheus": "bool", diff --git a/internal/schemautil/userconfig/stateupgrader/v0/flink/flink.go b/internal/schemautil/userconfig/stateupgrader/v0/flink/flink.go index 8a9008695..6b6559c80 100644 --- a/internal/schemautil/userconfig/stateupgrader/v0/flink/flink.go +++ b/internal/schemautil/userconfig/stateupgrader/v0/flink/flink.go @@ -85,10 +85,10 @@ func ResourceFlink() *schema.Resource { func ResourceFlinkStateUpgrade( _ context.Context, - rawState map[string]interface{}, - _ interface{}, -) (map[string]interface{}, error) { - userConfigSlice, ok := rawState["flink_user_config"].([]interface{}) + rawState map[string]any, + _ any, +) (map[string]any, error) { + userConfigSlice, ok := rawState["flink_user_config"].([]any) if !ok { return rawState, nil } @@ -97,7 +97,7 @@ func ResourceFlinkStateUpgrade( return rawState, nil } - userConfig, ok := userConfigSlice[0].(map[string]interface{}) + userConfig, ok := userConfigSlice[0].(map[string]any) if !ok { return rawState, nil } @@ -115,9 +115,9 @@ func ResourceFlinkStateUpgrade( return rawState, err } - privateLinkAccessSlice, ok := userConfig["privatelink_access"].([]interface{}) + privateLinkAccessSlice, ok := userConfig["privatelink_access"].([]any) if ok && len(privateLinkAccessSlice) > 0 { - privateLinkAccess, ok := privateLinkAccessSlice[0].(map[string]interface{}) + privateLinkAccess, ok := privateLinkAccessSlice[0].(map[string]any) if ok { err := typeupgrader.Map(privateLinkAccess, map[string]string{ "flink": "bool", diff --git a/internal/schemautil/userconfig/stateupgrader/v0/grafana/grafana.go b/internal/schemautil/userconfig/stateupgrader/v0/grafana/grafana.go index 0639b399c..2f7cb30b8 100644 --- a/internal/schemautil/userconfig/stateupgrader/v0/grafana/grafana.go +++ b/internal/schemautil/userconfig/stateupgrader/v0/grafana/grafana.go @@ -71,10 +71,10 @@ func ResourceGrafana() *schema.Resource { func ResourceGrafanaStateUpgrade( _ context.Context, - rawState map[string]interface{}, - _ interface{}, -) (map[string]interface{}, error) { - userConfigSlice, ok := rawState["grafana_user_config"].([]interface{}) + rawState map[string]any, + _ any, +) (map[string]any, error) { + userConfigSlice, ok := rawState["grafana_user_config"].([]any) if !ok { return rawState, nil } @@ -83,7 +83,7 @@ func ResourceGrafanaStateUpgrade( return rawState, nil } - userConfig, ok := userConfigSlice[0].(map[string]interface{}) + userConfig, ok := userConfigSlice[0].(map[string]any) if !ok { return rawState, nil } @@ -108,9 +108,9 @@ func ResourceGrafanaStateUpgrade( return rawState, err } - authAzureADSlice, ok := userConfig["auth_azuread"].([]interface{}) + authAzureADSlice, ok := userConfig["auth_azuread"].([]any) if ok && len(authAzureADSlice) > 0 { - authAzureAD, ok := authAzureADSlice[0].(map[string]interface{}) + authAzureAD, ok := authAzureADSlice[0].(map[string]any) if ok { err = typeupgrader.Map(authAzureAD, map[string]string{ "allow_sign_up": "bool", @@ -121,9 +121,9 @@ func ResourceGrafanaStateUpgrade( } } - authGenericOAuthSlice, ok := userConfig["auth_generic_oauth"].([]interface{}) + authGenericOAuthSlice, ok := userConfig["auth_generic_oauth"].([]any) if ok && len(authGenericOAuthSlice) > 0 { - authGenericOAuth, ok := authGenericOAuthSlice[0].(map[string]interface{}) + authGenericOAuth, ok := authGenericOAuthSlice[0].(map[string]any) if ok { err = typeupgrader.Map(authGenericOAuth, map[string]string{ "allow_sign_up": "bool", @@ -134,9 +134,9 @@ func ResourceGrafanaStateUpgrade( } } - authGitHubSlice, ok := userConfig["auth_github"].([]interface{}) + authGitHubSlice, ok := userConfig["auth_github"].([]any) if ok && len(authGitHubSlice) > 0 { - authGitHub, ok := authGitHubSlice[0].(map[string]interface{}) + authGitHub, ok := authGitHubSlice[0].(map[string]any) if ok { err = typeupgrader.Map(authGitHub, map[string]string{ "allow_sign_up": "bool", @@ -145,7 +145,7 @@ func ResourceGrafanaStateUpgrade( return rawState, err } - authGitHubTeamIDs, ok := authGitHub["team_ids"].([]interface{}) + authGitHubTeamIDs, ok := authGitHub["team_ids"].([]any) if ok { err = typeupgrader.Slice(authGitHubTeamIDs, "int") if err != nil { @@ -155,9 +155,9 @@ func ResourceGrafanaStateUpgrade( } } - authGitLabSlice, ok := userConfig["auth_gitlab"].([]interface{}) + authGitLabSlice, ok := userConfig["auth_gitlab"].([]any) if ok && len(authGitLabSlice) > 0 { - authGitLab, ok := authGitLabSlice[0].(map[string]interface{}) + authGitLab, ok := authGitLabSlice[0].(map[string]any) if ok { err = typeupgrader.Map(authGitLab, map[string]string{ "allow_sign_up": "bool", @@ -168,9 +168,9 @@ func ResourceGrafanaStateUpgrade( } } - authGoogleSlice, ok := userConfig["auth_google"].([]interface{}) + authGoogleSlice, ok := userConfig["auth_google"].([]any) if ok && len(authGoogleSlice) > 0 { - authGoogle, ok := authGoogleSlice[0].(map[string]interface{}) + authGoogle, ok := authGoogleSlice[0].(map[string]any) if ok { err = typeupgrader.Map(authGoogle, map[string]string{ "allow_sign_up": "bool", @@ -181,9 +181,9 @@ func ResourceGrafanaStateUpgrade( } } - privateAccessSlice, ok := userConfig["private_access"].([]interface{}) + privateAccessSlice, ok := userConfig["private_access"].([]any) if ok && len(privateAccessSlice) > 0 { - privateAccess, ok := privateAccessSlice[0].(map[string]interface{}) + privateAccess, ok := privateAccessSlice[0].(map[string]any) if ok { err = typeupgrader.Map(privateAccess, map[string]string{ "grafana": "bool", @@ -194,9 +194,9 @@ func ResourceGrafanaStateUpgrade( } } - privateLinkAccessSlice, ok := userConfig["privatelink_access"].([]interface{}) + privateLinkAccessSlice, ok := userConfig["privatelink_access"].([]any) if ok && len(privateLinkAccessSlice) > 0 { - privateLinkAccess, ok := privateLinkAccessSlice[0].(map[string]interface{}) + privateLinkAccess, ok := privateLinkAccessSlice[0].(map[string]any) if ok { err := typeupgrader.Map(privateLinkAccess, map[string]string{ "grafana": "bool", @@ -207,9 +207,9 @@ func ResourceGrafanaStateUpgrade( } } - publicAccessSlice, ok := userConfig["public_access"].([]interface{}) + publicAccessSlice, ok := userConfig["public_access"].([]any) if ok && len(publicAccessSlice) > 0 { - publicAccess, ok := publicAccessSlice[0].(map[string]interface{}) + publicAccess, ok := publicAccessSlice[0].(map[string]any) if ok { err := typeupgrader.Map(publicAccess, map[string]string{ "grafana": "bool", @@ -220,9 +220,9 @@ func ResourceGrafanaStateUpgrade( } } - smtpServerSlice, ok := userConfig["smtp_server"].([]interface{}) + smtpServerSlice, ok := userConfig["smtp_server"].([]any) if ok && len(smtpServerSlice) > 0 { - smtpServer, ok := smtpServerSlice[0].(map[string]interface{}) + smtpServer, ok := smtpServerSlice[0].(map[string]any) if ok { err := typeupgrader.Map(smtpServer, map[string]string{ "port": "int", diff --git a/internal/schemautil/userconfig/stateupgrader/v0/influxdb/influxdb.go b/internal/schemautil/userconfig/stateupgrader/v0/influxdb/influxdb.go index 636b2a8ae..5314443f0 100644 --- a/internal/schemautil/userconfig/stateupgrader/v0/influxdb/influxdb.go +++ b/internal/schemautil/userconfig/stateupgrader/v0/influxdb/influxdb.go @@ -79,10 +79,10 @@ func ResourceInfluxDB() *schema.Resource { func ResourceInfluxDBStateUpgrade( _ context.Context, - rawState map[string]interface{}, - _ interface{}, -) (map[string]interface{}, error) { - userConfigSlice, ok := rawState["influxdb_user_config"].([]interface{}) + rawState map[string]any, + _ any, +) (map[string]any, error) { + userConfigSlice, ok := rawState["influxdb_user_config"].([]any) if !ok { return rawState, nil } @@ -91,7 +91,7 @@ func ResourceInfluxDBStateUpgrade( return rawState, nil } - userConfig, ok := userConfigSlice[0].(map[string]interface{}) + userConfig, ok := userConfigSlice[0].(map[string]any) if !ok { return rawState, nil } @@ -103,9 +103,9 @@ func ResourceInfluxDBStateUpgrade( return rawState, err } - influxDBSlice, ok := userConfig["influxdb"].([]interface{}) + influxDBSlice, ok := userConfig["influxdb"].([]any) if ok && len(influxDBSlice) > 0 { - influxDB, ok := influxDBSlice[0].(map[string]interface{}) + influxDB, ok := influxDBSlice[0].(map[string]any) if ok { err := typeupgrader.Map(influxDB, map[string]string{ "log_queries_after": "int", @@ -121,9 +121,9 @@ func ResourceInfluxDBStateUpgrade( } } - privateAccessSlice, ok := userConfig["private_access"].([]interface{}) + privateAccessSlice, ok := userConfig["private_access"].([]any) if ok && len(privateAccessSlice) > 0 { - privateAccess, ok := privateAccessSlice[0].(map[string]interface{}) + privateAccess, ok := privateAccessSlice[0].(map[string]any) if ok { err = typeupgrader.Map(privateAccess, map[string]string{ "influxdb": "bool", @@ -134,9 +134,9 @@ func ResourceInfluxDBStateUpgrade( } } - privateLinkAccessSlice, ok := userConfig["privatelink_access"].([]interface{}) + privateLinkAccessSlice, ok := userConfig["privatelink_access"].([]any) if ok && len(privateLinkAccessSlice) > 0 { - privateLinkAccess, ok := privateLinkAccessSlice[0].(map[string]interface{}) + privateLinkAccess, ok := privateLinkAccessSlice[0].(map[string]any) if ok { err := typeupgrader.Map(privateLinkAccess, map[string]string{ "influxdb": "bool", @@ -147,9 +147,9 @@ func ResourceInfluxDBStateUpgrade( } } - publicAccessSlice, ok := userConfig["public_access"].([]interface{}) + publicAccessSlice, ok := userConfig["public_access"].([]any) if ok && len(publicAccessSlice) > 0 { - publicAccess, ok := publicAccessSlice[0].(map[string]interface{}) + publicAccess, ok := publicAccessSlice[0].(map[string]any) if ok { err := typeupgrader.Map(publicAccess, map[string]string{ "influxdb": "bool", diff --git a/internal/schemautil/userconfig/stateupgrader/v0/kafka/kafka.go b/internal/schemautil/userconfig/stateupgrader/v0/kafka/kafka.go index bfeea13f6..decdcd9b1 100644 --- a/internal/schemautil/userconfig/stateupgrader/v0/kafka/kafka.go +++ b/internal/schemautil/userconfig/stateupgrader/v0/kafka/kafka.go @@ -119,7 +119,7 @@ func ResourceKafka() *schema.Resource { ), // if a kafka_version is >= 3.0 then this schema field is not applicable - customdiff.ComputedIf("karapace", func(ctx context.Context, d *schema.ResourceDiff, m interface{}) bool { + customdiff.ComputedIf("karapace", func(ctx context.Context, d *schema.ResourceDiff, m any) bool { project := d.Get("project").(string) serviceName := d.Get("service_name").(string) client := m.(*aiven.Client) @@ -145,10 +145,10 @@ func ResourceKafka() *schema.Resource { func ResourceKafkaStateUpgrade( _ context.Context, - rawState map[string]interface{}, - _ interface{}, -) (map[string]interface{}, error) { - userConfigSlice, ok := rawState["kafka_user_config"].([]interface{}) + rawState map[string]any, + _ any, +) (map[string]any, error) { + userConfigSlice, ok := rawState["kafka_user_config"].([]any) if !ok { return rawState, nil } @@ -157,7 +157,7 @@ func ResourceKafkaStateUpgrade( return rawState, nil } - userConfig, ok := userConfigSlice[0].(map[string]interface{}) + userConfig, ok := userConfigSlice[0].(map[string]any) if !ok { return rawState, nil } @@ -172,9 +172,9 @@ func ResourceKafkaStateUpgrade( return rawState, err } - kafkaSlice, ok := userConfig["kafka"].([]interface{}) + kafkaSlice, ok := userConfig["kafka"].([]any) if ok && len(kafkaSlice) > 0 { - kafka, ok := kafkaSlice[0].(map[string]interface{}) + kafka, ok := kafkaSlice[0].(map[string]any) if ok { err = typeupgrader.Map(kafka, map[string]string{ "auto_create_topics_enable": "bool", @@ -220,9 +220,9 @@ func ResourceKafkaStateUpgrade( } } - kafkaAuthenticationMethodsSlice, ok := userConfig["kafka_authentication_methods"].([]interface{}) + kafkaAuthenticationMethodsSlice, ok := userConfig["kafka_authentication_methods"].([]any) if ok && len(kafkaAuthenticationMethodsSlice) > 0 { - kafkaAuthenticationMethods, ok := kafkaAuthenticationMethodsSlice[0].(map[string]interface{}) + kafkaAuthenticationMethods, ok := kafkaAuthenticationMethodsSlice[0].(map[string]any) if ok { err = typeupgrader.Map(kafkaAuthenticationMethods, map[string]string{ "certificate": "bool", @@ -234,9 +234,9 @@ func ResourceKafkaStateUpgrade( } } - kafkaConnectConfigSlice, ok := userConfig["kafka_connect_config"].([]interface{}) + kafkaConnectConfigSlice, ok := userConfig["kafka_connect_config"].([]any) if ok && len(kafkaConnectConfigSlice) > 0 { - kafkaConnectConfig, ok := kafkaConnectConfigSlice[0].(map[string]interface{}) + kafkaConnectConfig, ok := kafkaConnectConfigSlice[0].(map[string]any) if ok { err = typeupgrader.Map(kafkaConnectConfig, map[string]string{ "consumer_fetch_max_bytes": "int", @@ -254,9 +254,9 @@ func ResourceKafkaStateUpgrade( } } - kafkaRestConfigSlice, ok := userConfig["kafka_rest_config"].([]interface{}) + kafkaRestConfigSlice, ok := userConfig["kafka_rest_config"].([]any) if ok && len(kafkaRestConfigSlice) > 0 { - kafkaRestConfig, ok := kafkaRestConfigSlice[0].(map[string]interface{}) + kafkaRestConfig, ok := kafkaRestConfigSlice[0].(map[string]any) if ok { err = typeupgrader.Map(kafkaRestConfig, map[string]string{ "consumer_enable_auto_commit": "bool", @@ -271,9 +271,9 @@ func ResourceKafkaStateUpgrade( } } - privateAccessSlice, ok := userConfig["private_access"].([]interface{}) + privateAccessSlice, ok := userConfig["private_access"].([]any) if ok && len(privateAccessSlice) > 0 { - privateAccess, ok := privateAccessSlice[0].(map[string]interface{}) + privateAccess, ok := privateAccessSlice[0].(map[string]any) if ok { err = typeupgrader.Map(privateAccess, map[string]string{ "prometheus": "bool", @@ -284,9 +284,9 @@ func ResourceKafkaStateUpgrade( } } - privateLinkAccessSlice, ok := userConfig["privatelink_access"].([]interface{}) + privateLinkAccessSlice, ok := userConfig["privatelink_access"].([]any) if ok && len(privateLinkAccessSlice) > 0 { - privateLinkAccess, ok := privateLinkAccessSlice[0].(map[string]interface{}) + privateLinkAccess, ok := privateLinkAccessSlice[0].(map[string]any) if ok { err := typeupgrader.Map(privateLinkAccess, map[string]string{ "jolokia": "bool", @@ -302,9 +302,9 @@ func ResourceKafkaStateUpgrade( } } - publicAccessSlice, ok := userConfig["public_access"].([]interface{}) + publicAccessSlice, ok := userConfig["public_access"].([]any) if ok && len(publicAccessSlice) > 0 { - publicAccess, ok := publicAccessSlice[0].(map[string]interface{}) + publicAccess, ok := publicAccessSlice[0].(map[string]any) if ok { err := typeupgrader.Map(publicAccess, map[string]string{ "kafka": "bool", @@ -319,9 +319,9 @@ func ResourceKafkaStateUpgrade( } } - schemaRegistryConfigSlice, ok := userConfig["schema_registry_config"].([]interface{}) + schemaRegistryConfigSlice, ok := userConfig["schema_registry_config"].([]any) if ok && len(schemaRegistryConfigSlice) > 0 { - schemaRegistryConfig, ok := schemaRegistryConfigSlice[0].(map[string]interface{}) + schemaRegistryConfig, ok := schemaRegistryConfigSlice[0].(map[string]any) if ok { err := typeupgrader.Map(schemaRegistryConfig, map[string]string{ "leader_eligibility": "bool", diff --git a/internal/schemautil/userconfig/stateupgrader/v0/kafka/kafka_connect.go b/internal/schemautil/userconfig/stateupgrader/v0/kafka/kafka_connect.go index dc9a17cb9..bee7bda79 100644 --- a/internal/schemautil/userconfig/stateupgrader/v0/kafka/kafka_connect.go +++ b/internal/schemautil/userconfig/stateupgrader/v0/kafka/kafka_connect.go @@ -69,10 +69,10 @@ func ResourceKafkaConnect() *schema.Resource { func ResourceKafkaConnectStateUpgrade( _ context.Context, - rawState map[string]interface{}, - _ interface{}, -) (map[string]interface{}, error) { - userConfigSlice, ok := rawState["kafka_connect_user_config"].([]interface{}) + rawState map[string]any, + _ any, +) (map[string]any, error) { + userConfigSlice, ok := rawState["kafka_connect_user_config"].([]any) if !ok { return rawState, nil } @@ -81,7 +81,7 @@ func ResourceKafkaConnectStateUpgrade( return rawState, nil } - userConfig, ok := userConfigSlice[0].(map[string]interface{}) + userConfig, ok := userConfigSlice[0].(map[string]any) if !ok { return rawState, nil } @@ -93,9 +93,9 @@ func ResourceKafkaConnectStateUpgrade( return rawState, err } - kafkaConnectSlice, ok := userConfig["kafka_connect"].([]interface{}) + kafkaConnectSlice, ok := userConfig["kafka_connect"].([]any) if ok && len(kafkaConnectSlice) > 0 { - kafkaConnect, ok := kafkaConnectSlice[0].(map[string]interface{}) + kafkaConnect, ok := kafkaConnectSlice[0].(map[string]any) if ok { err = typeupgrader.Map(kafkaConnect, map[string]string{ "consumer_fetch_max_bytes": "int", @@ -113,9 +113,9 @@ func ResourceKafkaConnectStateUpgrade( } } - privateAccessSlice, ok := userConfig["private_access"].([]interface{}) + privateAccessSlice, ok := userConfig["private_access"].([]any) if ok && len(privateAccessSlice) > 0 { - privateAccess, ok := privateAccessSlice[0].(map[string]interface{}) + privateAccess, ok := privateAccessSlice[0].(map[string]any) if ok { err = typeupgrader.Map(privateAccess, map[string]string{ "kafka_connect": "bool", @@ -127,9 +127,9 @@ func ResourceKafkaConnectStateUpgrade( } } - privateLinkAccessSlice, ok := userConfig["privatelink_access"].([]interface{}) + privateLinkAccessSlice, ok := userConfig["privatelink_access"].([]any) if ok && len(privateLinkAccessSlice) > 0 { - privateLinkAccess, ok := privateLinkAccessSlice[0].(map[string]interface{}) + privateLinkAccess, ok := privateLinkAccessSlice[0].(map[string]any) if ok { err := typeupgrader.Map(privateLinkAccess, map[string]string{ "jolokia": "bool", @@ -142,9 +142,9 @@ func ResourceKafkaConnectStateUpgrade( } } - publicAccessSlice, ok := userConfig["public_access"].([]interface{}) + publicAccessSlice, ok := userConfig["public_access"].([]any) if ok && len(publicAccessSlice) > 0 { - publicAccess, ok := publicAccessSlice[0].(map[string]interface{}) + publicAccess, ok := publicAccessSlice[0].(map[string]any) if ok { err := typeupgrader.Map(publicAccess, map[string]string{ "kafka_connect": "bool", diff --git a/internal/schemautil/userconfig/stateupgrader/v0/kafka/kafka_mirrormaker.go b/internal/schemautil/userconfig/stateupgrader/v0/kafka/kafka_mirrormaker.go index f6aeec336..fb1b17f0a 100644 --- a/internal/schemautil/userconfig/stateupgrader/v0/kafka/kafka_mirrormaker.go +++ b/internal/schemautil/userconfig/stateupgrader/v0/kafka/kafka_mirrormaker.go @@ -69,10 +69,10 @@ func ResourceKafkaMirrormaker() *schema.Resource { func ResourceKafkaMirrormakerStateUpgrade( _ context.Context, - rawState map[string]interface{}, - _ interface{}, -) (map[string]interface{}, error) { - userConfigSlice, ok := rawState["kafka_mirrormaker_user_config"].([]interface{}) + rawState map[string]any, + _ any, +) (map[string]any, error) { + userConfigSlice, ok := rawState["kafka_mirrormaker_user_config"].([]any) if !ok { return rawState, nil } @@ -81,7 +81,7 @@ func ResourceKafkaMirrormakerStateUpgrade( return rawState, nil } - userConfig, ok := userConfigSlice[0].(map[string]interface{}) + userConfig, ok := userConfigSlice[0].(map[string]any) if !ok { return rawState, nil } @@ -93,9 +93,9 @@ func ResourceKafkaMirrormakerStateUpgrade( return rawState, err } - kafkaMirrormakerSlice, ok := userConfig["kafka_mirrormaker"].([]interface{}) + kafkaMirrormakerSlice, ok := userConfig["kafka_mirrormaker"].([]any) if ok && len(kafkaMirrormakerSlice) > 0 { - kafkaMirrormaker, ok := kafkaMirrormakerSlice[0].(map[string]interface{}) + kafkaMirrormaker, ok := kafkaMirrormakerSlice[0].(map[string]any) if ok { err = typeupgrader.Map(kafkaMirrormaker, map[string]string{ "emit_checkpoints_enabled": "bool", diff --git a/internal/schemautil/userconfig/stateupgrader/v0/kafka/kafka_topic.go b/internal/schemautil/userconfig/stateupgrader/v0/kafka/kafka_topic.go index 1fde5b1cd..2fa897bd3 100644 --- a/internal/schemautil/userconfig/stateupgrader/v0/kafka/kafka_topic.go +++ b/internal/schemautil/userconfig/stateupgrader/v0/kafka/kafka_topic.go @@ -234,10 +234,10 @@ func ResourceKafkaTopic() *schema.Resource { func ResourceKafkaTopicStateUpgrade( _ context.Context, - rawState map[string]interface{}, - _ interface{}, -) (map[string]interface{}, error) { - configSlice, ok := rawState["config"].([]interface{}) + rawState map[string]any, + _ any, +) (map[string]any, error) { + configSlice, ok := rawState["config"].([]any) if !ok { return rawState, nil } @@ -246,7 +246,7 @@ func ResourceKafkaTopicStateUpgrade( return rawState, nil } - config, ok := configSlice[0].(map[string]interface{}) + config, ok := configSlice[0].(map[string]any) if !ok { return rawState, nil } diff --git a/internal/schemautil/userconfig/stateupgrader/v0/m3/m3aggregator.go b/internal/schemautil/userconfig/stateupgrader/v0/m3/m3aggregator.go index feaadbf88..3c2202da7 100644 --- a/internal/schemautil/userconfig/stateupgrader/v0/m3/m3aggregator.go +++ b/internal/schemautil/userconfig/stateupgrader/v0/m3/m3aggregator.go @@ -68,10 +68,10 @@ func ResourceM3Aggregator() *schema.Resource { func ResourceM3AggregatorStateUpgrade( _ context.Context, - rawState map[string]interface{}, - _ interface{}, -) (map[string]interface{}, error) { - userConfigSlice, ok := rawState["m3aggregator_user_config"].([]interface{}) + rawState map[string]any, + _ any, +) (map[string]any, error) { + userConfigSlice, ok := rawState["m3aggregator_user_config"].([]any) if !ok { return rawState, nil } @@ -80,7 +80,7 @@ func ResourceM3AggregatorStateUpgrade( return rawState, nil } - userConfig, ok := userConfigSlice[0].(map[string]interface{}) + userConfig, ok := userConfigSlice[0].(map[string]any) if !ok { return rawState, nil } diff --git a/internal/schemautil/userconfig/stateupgrader/v0/m3/m3db.go b/internal/schemautil/userconfig/stateupgrader/v0/m3/m3db.go index 667bed72a..e46619925 100644 --- a/internal/schemautil/userconfig/stateupgrader/v0/m3/m3db.go +++ b/internal/schemautil/userconfig/stateupgrader/v0/m3/m3db.go @@ -36,7 +36,6 @@ func ResourceM3DBResource() *schema.Resource { DeleteContext: schemautil.ResourceServiceDelete, CustomizeDiff: customdiff.Sequence( schemautil.SetServiceTypeIfEmpty(schemautil.ServiceTypeM3), - schemautil.CustomizeDiffDisallowMultipleManyToOneKeys, customdiff.IfValueChange("tag", schemautil.TagsShouldNotBeEmpty, schemautil.CustomizeDiffCheckUniqueTag, @@ -73,10 +72,10 @@ func ResourceM3DBResource() *schema.Resource { func ResourceM3DBStateUpgrade( _ context.Context, - rawState map[string]interface{}, - _ interface{}, -) (map[string]interface{}, error) { - userConfigSlice, ok := rawState["m3db_user_config"].([]interface{}) + rawState map[string]any, + _ any, +) (map[string]any, error) { + userConfigSlice, ok := rawState["m3db_user_config"].([]any) if !ok { return rawState, nil } @@ -85,7 +84,7 @@ func ResourceM3DBStateUpgrade( return rawState, nil } - userConfig, ok := userConfigSlice[0].(map[string]interface{}) + userConfig, ok := userConfigSlice[0].(map[string]any) if !ok { return rawState, nil } @@ -98,9 +97,9 @@ func ResourceM3DBStateUpgrade( return rawState, err } - limitsSlice, ok := userConfig["limits"].([]interface{}) + limitsSlice, ok := userConfig["limits"].([]any) if ok && len(limitsSlice) > 0 { - limits, ok := limitsSlice[0].(map[string]interface{}) + limits, ok := limitsSlice[0].(map[string]any) if ok { err := typeupgrader.Map(limits, map[string]string{ "max_recently_queried_series_blocks": "int", @@ -115,17 +114,17 @@ func ResourceM3DBStateUpgrade( } } - namespacesSlice, ok := userConfig["namespaces"].([]interface{}) + namespacesSlice, ok := userConfig["namespaces"].([]any) if ok && len(namespacesSlice) > 0 { for _, v := range namespacesSlice { - namespace, ok := v.(map[string]interface{}) + namespace, ok := v.(map[string]any) if !ok { continue } - optionsSlice, ok := namespace["options"].([]interface{}) + optionsSlice, ok := namespace["options"].([]any) if ok && len(optionsSlice) > 0 { - options, ok := optionsSlice[0].(map[string]interface{}) + options, ok := optionsSlice[0].(map[string]any) if ok { err := typeupgrader.Map(options, map[string]string{ "snapshot_enabled": "bool", @@ -139,9 +138,9 @@ func ResourceM3DBStateUpgrade( } } - privateAccessSlice, ok := userConfig["private_access"].([]interface{}) + privateAccessSlice, ok := userConfig["private_access"].([]any) if ok && len(privateAccessSlice) > 0 { - privateAccess, ok := privateAccessSlice[0].(map[string]interface{}) + privateAccess, ok := privateAccessSlice[0].(map[string]any) if ok { err = typeupgrader.Map(privateAccess, map[string]string{ "m3coordinator": "bool", @@ -152,9 +151,9 @@ func ResourceM3DBStateUpgrade( } } - publicAccessSlice, ok := userConfig["public_access"].([]interface{}) + publicAccessSlice, ok := userConfig["public_access"].([]any) if ok && len(publicAccessSlice) > 0 { - publicAccess, ok := publicAccessSlice[0].(map[string]interface{}) + publicAccess, ok := publicAccessSlice[0].(map[string]any) if ok { err := typeupgrader.Map(publicAccess, map[string]string{ "m3coordinator": "bool", @@ -165,14 +164,14 @@ func ResourceM3DBStateUpgrade( } } - rulesSlice, ok := userConfig["rules"].([]interface{}) + rulesSlice, ok := userConfig["rules"].([]any) if ok && len(rulesSlice) > 0 { - rules, ok := rulesSlice[0].(map[string]interface{}) + rules, ok := rulesSlice[0].(map[string]any) if ok { - mappingSlice, ok := rules["mapping"].([]interface{}) + mappingSlice, ok := rules["mapping"].([]any) if ok && len(mappingSlice) > 0 { for _, v := range mappingSlice { - mapping, ok := v.(map[string]interface{}) + mapping, ok := v.(map[string]any) if !ok { continue } diff --git a/internal/schemautil/userconfig/stateupgrader/v0/mysql/mysql.go b/internal/schemautil/userconfig/stateupgrader/v0/mysql/mysql.go index 31e264a6e..faf51c7f4 100644 --- a/internal/schemautil/userconfig/stateupgrader/v0/mysql/mysql.go +++ b/internal/schemautil/userconfig/stateupgrader/v0/mysql/mysql.go @@ -73,10 +73,10 @@ func ResourceMySQLResource() *schema.Resource { func ResourceMySQLStateUpgrade( _ context.Context, - rawState map[string]interface{}, - _ interface{}, -) (map[string]interface{}, error) { - userConfigSlice, ok := rawState["mysql_user_config"].([]interface{}) + rawState map[string]any, + _ any, +) (map[string]any, error) { + userConfigSlice, ok := rawState["mysql_user_config"].([]any) if !ok { return rawState, nil } @@ -85,7 +85,7 @@ func ResourceMySQLStateUpgrade( return rawState, nil } - userConfig, ok := userConfigSlice[0].(map[string]interface{}) + userConfig, ok := userConfigSlice[0].(map[string]any) if !ok { return rawState, nil } @@ -100,9 +100,9 @@ func ResourceMySQLStateUpgrade( return rawState, err } - migrationSlice, ok := userConfig["migration"].([]interface{}) + migrationSlice, ok := userConfig["migration"].([]any) if ok && len(migrationSlice) > 0 { - migration, ok := migrationSlice[0].(map[string]interface{}) + migration, ok := migrationSlice[0].(map[string]any) if ok { err = typeupgrader.Map(migration, map[string]string{ "port": "int", @@ -114,9 +114,9 @@ func ResourceMySQLStateUpgrade( } } - mysqlSlice, ok := userConfig["mysql"].([]interface{}) + mysqlSlice, ok := userConfig["mysql"].([]any) if ok && len(mysqlSlice) > 0 { - mysql, ok := mysqlSlice[0].(map[string]interface{}) + mysql, ok := mysqlSlice[0].(map[string]any) if ok { err = typeupgrader.Map(mysql, map[string]string{ "connect_timeout": "int", @@ -152,9 +152,9 @@ func ResourceMySQLStateUpgrade( } } - privateAccessSlice, ok := userConfig["private_access"].([]interface{}) + privateAccessSlice, ok := userConfig["private_access"].([]any) if ok && len(privateAccessSlice) > 0 { - privateAccess, ok := privateAccessSlice[0].(map[string]interface{}) + privateAccess, ok := privateAccessSlice[0].(map[string]any) if ok { err = typeupgrader.Map(privateAccess, map[string]string{ "mysql": "bool", @@ -167,9 +167,9 @@ func ResourceMySQLStateUpgrade( } } - privateLinkAccessSlice, ok := userConfig["privatelink_access"].([]interface{}) + privateLinkAccessSlice, ok := userConfig["privatelink_access"].([]any) if ok && len(privateLinkAccessSlice) > 0 { - privateLinkAccess, ok := privateLinkAccessSlice[0].(map[string]interface{}) + privateLinkAccess, ok := privateLinkAccessSlice[0].(map[string]any) if ok { err := typeupgrader.Map(privateLinkAccess, map[string]string{ "mysql": "bool", @@ -182,9 +182,9 @@ func ResourceMySQLStateUpgrade( } } - publicAccessSlice, ok := userConfig["public_access"].([]interface{}) + publicAccessSlice, ok := userConfig["public_access"].([]any) if ok && len(publicAccessSlice) > 0 { - publicAccess, ok := publicAccessSlice[0].(map[string]interface{}) + publicAccess, ok := publicAccessSlice[0].(map[string]any) if ok { err := typeupgrader.Map(publicAccess, map[string]string{ "mysql": "bool", diff --git a/internal/schemautil/userconfig/stateupgrader/v0/opensearch/opensearch.go b/internal/schemautil/userconfig/stateupgrader/v0/opensearch/opensearch.go index 699742cb4..dc80c33f8 100644 --- a/internal/schemautil/userconfig/stateupgrader/v0/opensearch/opensearch.go +++ b/internal/schemautil/userconfig/stateupgrader/v0/opensearch/opensearch.go @@ -80,10 +80,10 @@ func ResourceOpenSearch() *schema.Resource { func ResourceOpenSearchStateUpgrade( _ context.Context, - rawState map[string]interface{}, - _ interface{}, -) (map[string]interface{}, error) { - userConfigSlice, ok := rawState["opensearch_user_config"].([]interface{}) + rawState map[string]any, + _ any, +) (map[string]any, error) { + userConfigSlice, ok := rawState["opensearch_user_config"].([]any) if !ok { return rawState, nil } @@ -92,7 +92,7 @@ func ResourceOpenSearchStateUpgrade( return rawState, nil } - userConfig, ok := userConfigSlice[0].(map[string]interface{}) + userConfig, ok := userConfigSlice[0].(map[string]any) if !ok { return rawState, nil } @@ -107,10 +107,10 @@ func ResourceOpenSearchStateUpgrade( return rawState, err } - indexPatternsSlice, ok := userConfig["index_patterns"].([]interface{}) + indexPatternsSlice, ok := userConfig["index_patterns"].([]any) if ok && len(indexPatternsSlice) > 0 { for _, v := range indexPatternsSlice { - indexPattern, ok := v.(map[string]interface{}) + indexPattern, ok := v.(map[string]any) if !ok { continue } @@ -124,9 +124,9 @@ func ResourceOpenSearchStateUpgrade( } } - indexTemplateSlice, ok := userConfig["index_template"].([]interface{}) + indexTemplateSlice, ok := userConfig["index_template"].([]any) if ok && len(indexTemplateSlice) > 0 { - indexTemplate, ok := indexTemplateSlice[0].(map[string]interface{}) + indexTemplate, ok := indexTemplateSlice[0].(map[string]any) if ok { err = typeupgrader.Map(indexTemplate, map[string]string{ "mapping_nested_objects_limit": "int", @@ -139,9 +139,9 @@ func ResourceOpenSearchStateUpgrade( } } - opensearchSlice, ok := userConfig["opensearch"].([]interface{}) + opensearchSlice, ok := userConfig["opensearch"].([]any) if ok && len(opensearchSlice) > 0 { - opensearch, ok := opensearchSlice[0].(map[string]interface{}) + opensearch, ok := opensearchSlice[0].(map[string]any) if ok { err = typeupgrader.Map(opensearch, map[string]string{ "action_auto_create_index_enabled": "bool", @@ -177,9 +177,9 @@ func ResourceOpenSearchStateUpgrade( } } - opensearchDashboardsSlice, ok := userConfig["opensearch_dashboards"].([]interface{}) + opensearchDashboardsSlice, ok := userConfig["opensearch_dashboards"].([]any) if ok && len(opensearchDashboardsSlice) > 0 { - opensearchDashboards, ok := opensearchDashboardsSlice[0].(map[string]interface{}) + opensearchDashboards, ok := opensearchDashboardsSlice[0].(map[string]any) if ok { err = typeupgrader.Map(opensearchDashboards, map[string]string{ "enabled": "bool", @@ -192,9 +192,9 @@ func ResourceOpenSearchStateUpgrade( } } - privateAccessSlice, ok := userConfig["private_access"].([]interface{}) + privateAccessSlice, ok := userConfig["private_access"].([]any) if ok && len(privateAccessSlice) > 0 { - privateAccess, ok := privateAccessSlice[0].(map[string]interface{}) + privateAccess, ok := privateAccessSlice[0].(map[string]any) if ok { err = typeupgrader.Map(privateAccess, map[string]string{ "opensearch": "bool", @@ -207,9 +207,9 @@ func ResourceOpenSearchStateUpgrade( } } - privateLinkAccessSlice, ok := userConfig["privatelink_access"].([]interface{}) + privateLinkAccessSlice, ok := userConfig["privatelink_access"].([]any) if ok && len(privateLinkAccessSlice) > 0 { - privateLinkAccess, ok := privateLinkAccessSlice[0].(map[string]interface{}) + privateLinkAccess, ok := privateLinkAccessSlice[0].(map[string]any) if ok { err := typeupgrader.Map(privateLinkAccess, map[string]string{ "opensearch": "bool", @@ -222,9 +222,9 @@ func ResourceOpenSearchStateUpgrade( } } - publicAccessSlice, ok := userConfig["public_access"].([]interface{}) + publicAccessSlice, ok := userConfig["public_access"].([]any) if ok && len(publicAccessSlice) > 0 { - publicAccess, ok := publicAccessSlice[0].(map[string]interface{}) + publicAccess, ok := publicAccessSlice[0].(map[string]any) if ok { err := typeupgrader.Map(publicAccess, map[string]string{ "opensearch": "bool", diff --git a/internal/schemautil/userconfig/stateupgrader/v0/pg/pg.go b/internal/schemautil/userconfig/stateupgrader/v0/pg/pg.go index 57bb5afb5..f824b7827 100644 --- a/internal/schemautil/userconfig/stateupgrader/v0/pg/pg.go +++ b/internal/schemautil/userconfig/stateupgrader/v0/pg/pg.go @@ -125,10 +125,10 @@ func ResourcePG() *schema.Resource { func ResourcePGStateUpgrade( _ context.Context, - rawState map[string]interface{}, - _ interface{}, -) (map[string]interface{}, error) { - userConfigSlice, ok := rawState["pg_user_config"].([]interface{}) + rawState map[string]any, + _ any, +) (map[string]any, error) { + userConfigSlice, ok := rawState["pg_user_config"].([]any) if !ok { return rawState, nil } @@ -137,7 +137,7 @@ func ResourcePGStateUpgrade( return rawState, nil } - userConfig, ok := userConfigSlice[0].(map[string]interface{}) + userConfig, ok := userConfigSlice[0].(map[string]any) if !ok { return rawState, nil } @@ -156,9 +156,9 @@ func ResourcePGStateUpgrade( return rawState, err } - migrationSlice, ok := userConfig["migration"].([]interface{}) + migrationSlice, ok := userConfig["migration"].([]any) if ok && len(migrationSlice) > 0 { - migration, ok := migrationSlice[0].(map[string]interface{}) + migration, ok := migrationSlice[0].(map[string]any) if ok { err := typeupgrader.Map(migration, map[string]string{ "port": "int", @@ -170,9 +170,9 @@ func ResourcePGStateUpgrade( } } - pgSlice, ok := userConfig["pg"].([]interface{}) + pgSlice, ok := userConfig["pg"].([]any) if ok && len(pgSlice) > 0 { - pg, ok := pgSlice[0].(map[string]interface{}) + pg, ok := pgSlice[0].(map[string]any) if ok { err := typeupgrader.Map(pg, map[string]string{ "autovacuum_analyze_scale_factor": "float", @@ -220,9 +220,9 @@ func ResourcePGStateUpgrade( } } - pgBouncerSlice, ok := userConfig["pgbouncer"].([]interface{}) + pgBouncerSlice, ok := userConfig["pgbouncer"].([]any) if ok && len(pgBouncerSlice) > 0 { - pgBouncer, ok := pgBouncerSlice[0].(map[string]interface{}) + pgBouncer, ok := pgBouncerSlice[0].(map[string]any) if ok { err := typeupgrader.Map(pgBouncer, map[string]string{ "autodb_idle_timeout": "int", @@ -239,9 +239,9 @@ func ResourcePGStateUpgrade( } } - pgLookoutSlice, ok := userConfig["pglookout"].([]interface{}) + pgLookoutSlice, ok := userConfig["pglookout"].([]any) if ok && len(pgLookoutSlice) > 0 { - pgLookout, ok := pgLookoutSlice[0].(map[string]interface{}) + pgLookout, ok := pgLookoutSlice[0].(map[string]any) if ok { err := typeupgrader.Map(pgLookout, map[string]string{ "max_failover_replication_time_lag": "int", @@ -252,9 +252,9 @@ func ResourcePGStateUpgrade( } } - privateAccessSlice, ok := userConfig["private_access"].([]interface{}) + privateAccessSlice, ok := userConfig["private_access"].([]any) if ok && len(privateAccessSlice) > 0 { - privateAccess, ok := privateAccessSlice[0].(map[string]interface{}) + privateAccess, ok := privateAccessSlice[0].(map[string]any) if ok { err = typeupgrader.Map(privateAccess, map[string]string{ "pg": "bool", @@ -267,9 +267,9 @@ func ResourcePGStateUpgrade( } } - privateLinkAccessSlice, ok := userConfig["privatelink_access"].([]interface{}) + privateLinkAccessSlice, ok := userConfig["privatelink_access"].([]any) if ok && len(privateLinkAccessSlice) > 0 { - privateLinkAccess, ok := privateLinkAccessSlice[0].(map[string]interface{}) + privateLinkAccess, ok := privateLinkAccessSlice[0].(map[string]any) if ok { err := typeupgrader.Map(privateLinkAccess, map[string]string{ "pg": "bool", @@ -282,9 +282,9 @@ func ResourcePGStateUpgrade( } } - publicAccessSlice, ok := userConfig["public_access"].([]interface{}) + publicAccessSlice, ok := userConfig["public_access"].([]any) if ok && len(publicAccessSlice) > 0 { - publicAccess, ok := publicAccessSlice[0].(map[string]interface{}) + publicAccess, ok := publicAccessSlice[0].(map[string]any) if ok { err := typeupgrader.Map(publicAccess, map[string]string{ "pg": "bool", @@ -297,9 +297,9 @@ func ResourcePGStateUpgrade( } } - timescaleDBSlice, ok := userConfig["timescaledb"].([]interface{}) + timescaleDBSlice, ok := userConfig["timescaledb"].([]any) if ok && len(timescaleDBSlice) > 0 { - timescaleDB, ok := timescaleDBSlice[0].(map[string]interface{}) + timescaleDB, ok := timescaleDBSlice[0].(map[string]any) if ok { err := typeupgrader.Map(timescaleDB, map[string]string{ "max_background_workers": "int", diff --git a/internal/schemautil/userconfig/stateupgrader/v0/redis/redis.go b/internal/schemautil/userconfig/stateupgrader/v0/redis/redis.go index d7e2ef8f2..f7d08c713 100644 --- a/internal/schemautil/userconfig/stateupgrader/v0/redis/redis.go +++ b/internal/schemautil/userconfig/stateupgrader/v0/redis/redis.go @@ -73,10 +73,10 @@ func ResourceRedis() *schema.Resource { func ResourceRedisStateUpgrade( _ context.Context, - rawState map[string]interface{}, - _ interface{}, -) (map[string]interface{}, error) { - userConfigSlice, ok := rawState["redis_user_config"].([]interface{}) + rawState map[string]any, + _ any, +) (map[string]any, error) { + userConfigSlice, ok := rawState["redis_user_config"].([]any) if !ok { return rawState, nil } @@ -85,7 +85,7 @@ func ResourceRedisStateUpgrade( return rawState, nil } - userConfig, ok := userConfigSlice[0].(map[string]interface{}) + userConfig, ok := userConfigSlice[0].(map[string]any) if !ok { return rawState, nil } @@ -104,9 +104,9 @@ func ResourceRedisStateUpgrade( return rawState, err } - migrationSlice, ok := userConfig["migration"].([]interface{}) + migrationSlice, ok := userConfig["migration"].([]any) if ok && len(migrationSlice) > 0 { - migration, ok := migrationSlice[0].(map[string]interface{}) + migration, ok := migrationSlice[0].(map[string]any) if ok { err := typeupgrader.Map(migration, map[string]string{ "port": "int", @@ -118,9 +118,9 @@ func ResourceRedisStateUpgrade( } } - privateAccessSlice, ok := userConfig["private_access"].([]interface{}) + privateAccessSlice, ok := userConfig["private_access"].([]any) if ok && len(privateAccessSlice) > 0 { - privateAccess, ok := privateAccessSlice[0].(map[string]interface{}) + privateAccess, ok := privateAccessSlice[0].(map[string]any) if ok { err = typeupgrader.Map(privateAccess, map[string]string{ "prometheus": "bool", @@ -132,9 +132,9 @@ func ResourceRedisStateUpgrade( } } - privateLinkAccessSlice, ok := userConfig["privatelink_access"].([]interface{}) + privateLinkAccessSlice, ok := userConfig["privatelink_access"].([]any) if ok && len(privateLinkAccessSlice) > 0 { - privateLinkAccess, ok := privateLinkAccessSlice[0].(map[string]interface{}) + privateLinkAccess, ok := privateLinkAccessSlice[0].(map[string]any) if ok { err := typeupgrader.Map(privateLinkAccess, map[string]string{ "prometheus": "bool", @@ -146,9 +146,9 @@ func ResourceRedisStateUpgrade( } } - publicAccessSlice, ok := userConfig["public_access"].([]interface{}) + publicAccessSlice, ok := userConfig["public_access"].([]any) if ok && len(publicAccessSlice) > 0 { - publicAccess, ok := publicAccessSlice[0].(map[string]interface{}) + publicAccess, ok := publicAccessSlice[0].(map[string]any) if ok { err := typeupgrader.Map(publicAccess, map[string]string{ "prometheus": "bool", diff --git a/internal/schemautil/userconfig/stateupgrader/v0/serviceintegration/service_integration.go b/internal/schemautil/userconfig/stateupgrader/v0/serviceintegration/service_integration.go index 2de05bb8b..17009db28 100644 --- a/internal/schemautil/userconfig/stateupgrader/v0/serviceintegration/service_integration.go +++ b/internal/schemautil/userconfig/stateupgrader/v0/serviceintegration/service_integration.go @@ -87,9 +87,9 @@ func ResourceServiceIntegration() *schema.Resource { func ResourceServiceIntegrationStateUpgrade( _ context.Context, - rawState map[string]interface{}, - _ interface{}, -) (map[string]interface{}, error) { + rawState map[string]any, + _ any, +) (map[string]any, error) { err := logsStateUpgrade(rawState) if err != nil { return rawState, err @@ -113,8 +113,8 @@ func ResourceServiceIntegrationStateUpgrade( return rawState, nil } -func logsStateUpgrade(rawState map[string]interface{}) error { - userConfigSlice, ok := rawState["logs_user_config"].([]interface{}) +func logsStateUpgrade(rawState map[string]any) error { + userConfigSlice, ok := rawState["logs_user_config"].([]any) if !ok { return nil } @@ -123,7 +123,7 @@ func logsStateUpgrade(rawState map[string]interface{}) error { return nil } - userConfig, ok := userConfigSlice[0].(map[string]interface{}) + userConfig, ok := userConfigSlice[0].(map[string]any) if !ok { return nil } @@ -138,8 +138,8 @@ func logsStateUpgrade(rawState map[string]interface{}) error { return nil } -func kafkaMirrormakerStateUpgrade(rawState map[string]interface{}) error { - userConfigSlice, ok := rawState["kafka_mirrormaker_user_config"].([]interface{}) +func kafkaMirrormakerStateUpgrade(rawState map[string]any) error { + userConfigSlice, ok := rawState["kafka_mirrormaker_user_config"].([]any) if !ok { return nil } @@ -148,14 +148,14 @@ func kafkaMirrormakerStateUpgrade(rawState map[string]interface{}) error { return nil } - userConfig, ok := userConfigSlice[0].(map[string]interface{}) + userConfig, ok := userConfigSlice[0].(map[string]any) if !ok { return nil } - kafkaMirrormakerSlice, ok := userConfig["kafka_mirrormaker"].([]interface{}) + kafkaMirrormakerSlice, ok := userConfig["kafka_mirrormaker"].([]any) if ok && len(kafkaMirrormakerSlice) > 0 { - kafkaMirrormaker, ok := kafkaMirrormakerSlice[0].(map[string]interface{}) + kafkaMirrormaker, ok := kafkaMirrormakerSlice[0].(map[string]any) if !ok { return nil } @@ -175,8 +175,8 @@ func kafkaMirrormakerStateUpgrade(rawState map[string]interface{}) error { return nil } -func metricsStateUpgrade(rawState map[string]interface{}) error { - userConfigSlice, ok := rawState["metrics_user_config"].([]interface{}) +func metricsStateUpgrade(rawState map[string]any) error { + userConfigSlice, ok := rawState["metrics_user_config"].([]any) if !ok { return nil } @@ -185,7 +185,7 @@ func metricsStateUpgrade(rawState map[string]interface{}) error { return nil } - userConfig, ok := userConfigSlice[0].(map[string]interface{}) + userConfig, ok := userConfigSlice[0].(map[string]any) if !ok { return nil } @@ -197,13 +197,13 @@ func metricsStateUpgrade(rawState map[string]interface{}) error { return err } - sourceMySQLSlice, ok := userConfig["source_mysql"].([]interface{}) + sourceMySQLSlice, ok := userConfig["source_mysql"].([]any) if ok && len(sourceMySQLSlice) > 0 { - sourceMySQL, ok := sourceMySQLSlice[0].(map[string]interface{}) + sourceMySQL, ok := sourceMySQLSlice[0].(map[string]any) if ok { - telegrafSlice, ok := sourceMySQL["telegraf"].([]interface{}) + telegrafSlice, ok := sourceMySQL["telegraf"].([]any) if ok && len(telegrafSlice) > 0 { - telegraf, ok := telegrafSlice[0].(map[string]interface{}) + telegraf, ok := telegrafSlice[0].(map[string]any) if ok { err := typeupgrader.Map(telegraf, map[string]string{ "gather_event_waits": "bool", @@ -232,8 +232,8 @@ func metricsStateUpgrade(rawState map[string]interface{}) error { return nil } -func serviceIntegrationDatadogStateUpgrade(rawState map[string]interface{}) error { - userConfigSlice, ok := rawState["datadog_user_config"].([]interface{}) +func serviceIntegrationDatadogStateUpgrade(rawState map[string]any) error { + userConfigSlice, ok := rawState["datadog_user_config"].([]any) if !ok { return nil } @@ -242,7 +242,7 @@ func serviceIntegrationDatadogStateUpgrade(rawState map[string]interface{}) erro return nil } - userConfig, ok := userConfigSlice[0].(map[string]interface{}) + userConfig, ok := userConfigSlice[0].(map[string]any) if !ok { return nil } diff --git a/internal/schemautil/userconfig/stateupgrader/v0/serviceintegration/service_integration_endpoint.go b/internal/schemautil/userconfig/stateupgrader/v0/serviceintegration/service_integration_endpoint.go index d999ed623..fa8a08425 100644 --- a/internal/schemautil/userconfig/stateupgrader/v0/serviceintegration/service_integration_endpoint.go +++ b/internal/schemautil/userconfig/stateupgrader/v0/serviceintegration/service_integration_endpoint.go @@ -60,9 +60,9 @@ func ResourceServiceIntegrationEndpoint() *schema.Resource { func ResourceServiceIntegrationEndpointStateUpgrade( _ context.Context, - rawState map[string]interface{}, - _ interface{}, -) (map[string]interface{}, error) { + rawState map[string]any, + _ any, +) (map[string]any, error) { err := serviceIntegrationEndpointDatadogStateUpgrade(rawState) if err != nil { return rawState, err @@ -86,8 +86,8 @@ func ResourceServiceIntegrationEndpointStateUpgrade( return rawState, nil } -func serviceIntegrationEndpointDatadogStateUpgrade(rawState map[string]interface{}) error { - userConfigSlice, ok := rawState["datadog_user_config"].([]interface{}) +func serviceIntegrationEndpointDatadogStateUpgrade(rawState map[string]any) error { + userConfigSlice, ok := rawState["datadog_user_config"].([]any) if !ok { return nil } @@ -96,7 +96,7 @@ func serviceIntegrationEndpointDatadogStateUpgrade(rawState map[string]interface return nil } - userConfig, ok := userConfigSlice[0].(map[string]interface{}) + userConfig, ok := userConfigSlice[0].(map[string]any) if !ok { return nil } @@ -114,8 +114,8 @@ func serviceIntegrationEndpointDatadogStateUpgrade(rawState map[string]interface return nil } -func rsyslogStateUpgrade(rawState map[string]interface{}) error { - userConfigSlice, ok := rawState["rsyslog_user_config"].([]interface{}) +func rsyslogStateUpgrade(rawState map[string]any) error { + userConfigSlice, ok := rawState["rsyslog_user_config"].([]any) if !ok { return nil } @@ -124,7 +124,7 @@ func rsyslogStateUpgrade(rawState map[string]interface{}) error { return nil } - userConfig, ok := userConfigSlice[0].(map[string]interface{}) + userConfig, ok := userConfigSlice[0].(map[string]any) if !ok { return nil } @@ -140,8 +140,8 @@ func rsyslogStateUpgrade(rawState map[string]interface{}) error { return nil } -func externalElasticsearchLogsStateUpgrade(rawState map[string]interface{}) error { - userConfigSlice, ok := rawState["external_elasticsearch_logs_user_config"].([]interface{}) +func externalElasticsearchLogsStateUpgrade(rawState map[string]any) error { + userConfigSlice, ok := rawState["external_elasticsearch_logs_user_config"].([]any) if !ok { return nil } @@ -150,7 +150,7 @@ func externalElasticsearchLogsStateUpgrade(rawState map[string]interface{}) erro return nil } - userConfig, ok := userConfigSlice[0].(map[string]interface{}) + userConfig, ok := userConfigSlice[0].(map[string]any) if !ok { return nil } @@ -166,8 +166,8 @@ func externalElasticsearchLogsStateUpgrade(rawState map[string]interface{}) erro return nil } -func externalOpenSearchLogsStateUpgrade(rawState map[string]interface{}) error { - userConfigSlice, ok := rawState["external_opensearch_logs_user_config"].([]interface{}) +func externalOpenSearchLogsStateUpgrade(rawState map[string]any) error { + userConfigSlice, ok := rawState["external_opensearch_logs_user_config"].([]any) if !ok { return nil } @@ -176,7 +176,7 @@ func externalOpenSearchLogsStateUpgrade(rawState map[string]interface{}) error { return nil } - userConfig, ok := userConfigSlice[0].(map[string]interface{}) + userConfig, ok := userConfigSlice[0].(map[string]any) if !ok { return nil } diff --git a/internal/schemautil/userconfig/userconfig_test.go b/internal/schemautil/userconfig/userconfig_test.go index 4c8446747..52f61d8af 100644 --- a/internal/schemautil/userconfig/userconfig_test.go +++ b/internal/schemautil/userconfig/userconfig_test.go @@ -14,67 +14,73 @@ import ( "golang.org/x/exp/slices" ) -// generateSchema is a function that generates Terraform schema via its map representation. -func generateSchema(n string, m map[string]interface{}) error { - np := fmt.Sprintf("%ss", n) +// generateSchema generates Terraform schema from a map representation of the schema. +func generateSchema(schemaName string, schemaMap map[string]any) error { + schemaNamePlural := fmt.Sprintf("%ss", schemaName) - f := jen.NewFile("dist") + file := jen.NewFile("dist") - f.HeaderComment("Code generated by internal/schemautil/userconfig/userconfig_test.go; DO NOT EDIT.") + file.HeaderComment("Code generated by internal/schemautil/userconfig/userconfig_test.go; DO NOT EDIT.") - smk := maps.Keys(m) - slices.Sort(smk) + sortedMapKeys := maps.Keys(schemaMap) + slices.Sort(sortedMapKeys) - for _, k := range smk { - v := m[k] + for _, key := range sortedMapKeys { + value := schemaMap[key] - kp := strcase.ToGoPascal(k) + keyPascalCase := strcase.ToGoPascal(key) - va, ok := v.(map[string]interface{}) + valueAsserted, ok := value.(map[string]any) if !ok { continue } - pa, ok := va["properties"].(map[string]interface{}) + properties, ok := valueAsserted["properties"].(map[string]any) if !ok { continue } - fn := fmt.Sprintf("%s%s", n, kp) + functionName := fmt.Sprintf("%s%s", schemaName, keyPascalCase) - f.Commentf("%s is a generated function returning the schema of the %s %s.", fn, k, n) + file.Commentf( + "%s is a generated function returning the schema of the %s %s.", functionName, key, schemaName, + ) - req := map[string]struct{}{} + required := map[string]struct{}{} - if sreq, ok := va["required"].([]interface{}); ok { - req = SliceToKeyedMap(sreq) + if requiredSlice, ok := valueAsserted["required"].([]any); ok { + required = SliceToKeyedMap(requiredSlice) } - pm, err := convertPropertiesToSchemaMap(pa, req) + propertiesMap, err := convertPropertiesToSchemaMap(properties, required) if err != nil { return err } - f. + file. Func(). - Id(fn). + Id(functionName). Params(). Id("*schema.Schema"). Block( - jen.Id("s").Op(":=").Map(jen.String()).Op("*").Qual(SchemaPackage, "Schema").Values(pm), + jen.Id("s").Op(":=").Map(jen.String()).Op("*").Qual( + SchemaPackage, "Schema", + ).Values(propertiesMap), jen.Line(), jen.Return( jen.Op("&").Qual(SchemaPackage, "Schema").Values(jen.Dict{ - jen.Id("Type"): jen.Qual(SchemaPackage, "TypeList"), - jen.Id("Description"): jen.Lit(fmt.Sprintf("%s user configurable settings", kp)), - jen.Id("Elem"): jen.Op("&").Qual(SchemaPackage, "Resource").Values(jen.Dict{ - jen.Id("Schema"): jen.Id("s"), - }), + jen.Id("Type"): jen.Qual(SchemaPackage, "TypeList"), + jen.Id("Description"): jen.Lit(fmt.Sprintf( + "%s user configurable settings", keyPascalCase, + )), + jen.Id("Elem"): jen.Op("&").Qual(SchemaPackage, "Resource"). + Values(jen.Dict{jen.Id("Schema"): jen.Id("s")}), jen.Id("Optional"): jen.Lit(true), - jen.Id("DiffSuppressFunc"): jen. - Qual(SchemaUtilPackage, "EmptyObjectDiffSuppressFuncSkipArrays").Call(jen.Id("s")), + jen.Id("DiffSuppressFunc"): jen.Qual( + SchemaUtilPackage, "EmptyObjectDiffSuppressFuncSkipArrays", + ).Call(jen.Id("s")), jen.Id("MaxItems"): jen.Lit(1), }), ), @@ -82,7 +88,7 @@ func generateSchema(n string, m map[string]interface{}) error { Line() } - if err := f.Save(fmt.Sprintf("dist/%s.go", strcase.ToSnake(np))); err != nil { + if err := file.Save(fmt.Sprintf("dist/%s.go", strcase.ToSnake(schemaNamePlural))); err != nil { return err } @@ -91,32 +97,32 @@ func generateSchema(n string, m map[string]interface{}) error { // TestMain is the entry point for the user config schema generator. func TestMain(m *testing.M) { - stm, err := representationToMap(ServiceTypes, dist.ServiceTypes) + serviceTypesMap, err := representationToMap(ServiceTypes, dist.ServiceTypes) if err != nil { panic(err) } - err = generateSchema("ServiceType", stm) + err = generateSchema("ServiceType", serviceTypesMap) if err != nil { panic(err) } - itm, err := representationToMap(IntegrationTypes, dist.IntegrationTypes) + integrationTypesMap, err := representationToMap(IntegrationTypes, dist.IntegrationTypes) if err != nil { panic(err) } - err = generateSchema("IntegrationType", itm) + err = generateSchema("IntegrationType", integrationTypesMap) if err != nil { panic(err) } - ietm, err := representationToMap(IntegrationEndpointTypes, dist.IntegrationEndpointTypes) + integrationEndpointTypesMap, err := representationToMap(IntegrationEndpointTypes, dist.IntegrationEndpointTypes) if err != nil { panic(err) } - err = generateSchema("IntegrationEndpointType", ietm) + err = generateSchema("IntegrationEndpointType", integrationEndpointTypesMap) if err != nil { panic(err) } diff --git a/internal/schemautil/userconfig/util.go b/internal/schemautil/userconfig/util.go index 27af11509..730166193 100644 --- a/internal/schemautil/userconfig/util.go +++ b/internal/schemautil/userconfig/util.go @@ -11,103 +11,105 @@ import ( "gopkg.in/yaml.v3" ) -// SchemaType is a custom type that represents a Terraform schema type. +// SchemaType represents a custom type for Terraform schema. type SchemaType int const ( - // ServiceTypes is a constant that represents service schema type. + // ServiceTypes represents the service schema type. ServiceTypes SchemaType = iota - // IntegrationTypes is a constant that represents integration schema type. + // IntegrationTypes represents the integration schema type. IntegrationTypes - // IntegrationEndpointTypes is a constant that represents integration endpoint schema type. + // IntegrationEndpointTypes represents the integration endpoint schema type. IntegrationEndpointTypes ) -// cachedRepresentationMaps is a map of cached representation maps. -var cachedRepresentationMaps = make(map[SchemaType]map[string]interface{}, 3) +var ( + // cachedRepresentationMaps is a map of cached representation maps. + cachedRepresentationMaps = make(map[SchemaType]map[string]any, 3) -// cachedRepresentationMapsMutex is a mutex for the cached representation maps. -var cachedRepresentationMapsMutex = sync.Mutex{} + // cachedRepresentationMapsMutex is a mutex for the cached representation maps. + cachedRepresentationMapsMutex = sync.Mutex{} -// typeSuffixRegExp is a regular expression that matches type suffixes. -var typeSuffixRegExp = regexp.MustCompile(`^.*_(boolean|integer|number|string|array|object)$`) + // typeSuffixRegExp is a regular expression that matches type suffixes. + typeSuffixRegExp = regexp.MustCompile(`^.*_(boolean|integer|number|string|array|object)$`) +) -// CachedRepresentationMap is a function that returns a cached representation map. -func CachedRepresentationMap(st SchemaType) (map[string]interface{}, error) { +// CachedRepresentationMap returns a cached representation map for a given schema type. +func CachedRepresentationMap(schemaType SchemaType) (map[string]any, error) { if _, ok := map[SchemaType]struct{}{ ServiceTypes: {}, IntegrationTypes: {}, IntegrationEndpointTypes: {}, - }[st]; !ok { - return nil, fmt.Errorf("unknown schema type: %d", st) + }[schemaType]; !ok { + return nil, fmt.Errorf("unknown schema type: %d", schemaType) } - switch st { + switch schemaType { case ServiceTypes: - return representationToMap(st, dist.ServiceTypes) + return representationToMap(schemaType, dist.ServiceTypes) case IntegrationTypes: - return representationToMap(st, dist.IntegrationTypes) + return representationToMap(schemaType, dist.IntegrationTypes) case IntegrationEndpointTypes: - return representationToMap(st, dist.IntegrationEndpointTypes) + return representationToMap(schemaType, dist.IntegrationEndpointTypes) default: - return nil, fmt.Errorf("unknown schema type %d", st) + return nil, fmt.Errorf("unknown schema type %d", schemaType) } } // representationToMap converts a YAML representation of a Terraform schema to a map. -func representationToMap(st SchemaType, r []byte) (map[string]interface{}, error) { +func representationToMap(schemaType SchemaType, representation []byte) (map[string]any, error) { cachedRepresentationMapsMutex.Lock() defer cachedRepresentationMapsMutex.Unlock() - if v, ok := cachedRepresentationMaps[st]; ok { - return v, nil + if cachedMap, ok := cachedRepresentationMaps[schemaType]; ok { + return cachedMap, nil } - var m map[string]interface{} - if err := yaml.Unmarshal(r, &m); err != nil { + var mapRepresentation map[string]any + if err := yaml.Unmarshal(representation, &mapRepresentation); err != nil { return nil, err } - cachedRepresentationMaps[st] = m - return m, nil + cachedRepresentationMaps[schemaType] = mapRepresentation + return mapRepresentation, nil } -// TerraformTypes is a function that converts schema representation types to Terraform types. -func TerraformTypes(t []string) ([]string, []string, error) { - var r, ar []string +// TerraformTypes converts schema representation types to Terraform types. +func TerraformTypes(types []string) ([]string, []string, error) { + var terraformTypes, aivenTypes []string - for _, v := range t { - switch v { + for _, typeValue := range types { + switch typeValue { case "null": - // TODO: We should probably handle this case. - // This is a special case where the value can be null. - // There should be a default value set for this case. + // TODO: Handle this case. + // This is a special case where the value can be null. + // There should be a default value set for this case. continue case "boolean": - r = append(r, "TypeBool") + terraformTypes = append(terraformTypes, "TypeBool") case "integer": - r = append(r, "TypeInt") + terraformTypes = append(terraformTypes, "TypeInt") case "number": - r = append(r, "TypeFloat") + terraformTypes = append(terraformTypes, "TypeFloat") case "string": - r = append(r, "TypeString") + terraformTypes = append(terraformTypes, "TypeString") case "array", "object": - r = append(r, "TypeList") + terraformTypes = append(terraformTypes, "TypeList") default: - return nil, nil, fmt.Errorf("unknown type: %s", v) + return nil, nil, fmt.Errorf("unknown type: %s", typeValue) } - ar = append(ar, v) + aivenTypes = append(aivenTypes, typeValue) } - return r, ar, nil + return terraformTypes, aivenTypes, nil } -// isTerraformTypePrimitive is a function that checks if a Terraform type is a primitive type. -func isTerraformTypePrimitive(t string) bool { - switch t { +// isTerraformTypePrimitive checks if a Terraform type is a primitive type. +func isTerraformTypePrimitive(terraformType string) bool { + switch terraformType { case "TypeBool", "TypeInt", "TypeFloat", "TypeString": return true default: @@ -115,127 +117,135 @@ func isTerraformTypePrimitive(t string) bool { } } -// mustStringSlice is a function that converts an interface to a slice of strings. -func mustStringSlice(v interface{}) ([]string, error) { - va, ok := v.([]interface{}) +// mustStringSlice converts an interface to a slice of strings. +func mustStringSlice(value any) ([]string, error) { + valueAsSlice, ok := value.([]any) if !ok { - return nil, fmt.Errorf("not a slice: %#v", v) + return nil, fmt.Errorf("not a slice: %#v", value) } - r := make([]string, len(va)) + stringSlice := make([]string, len(valueAsSlice)) - for k, v := range va { - va, ok := v.(string) + for index, value := range valueAsSlice { + stringValue, ok := value.(string) if !ok { - return nil, fmt.Errorf("value is not a string: %#v", v) + return nil, fmt.Errorf("value is not a string: %#v", value) } - r[k] = va + stringSlice[index] = stringValue } - return r, nil + return stringSlice, nil } -// SlicedString is a function that accepts a string or a slice of strings and returns a slice of strings. -func SlicedString(v interface{}) []string { - va, ok := v.([]interface{}) +// SlicedString accepts a string or a slice of strings and returns a slice of strings. +func SlicedString(value any) []string { + valueAsSlice, ok := value.([]any) if ok { - vas, err := mustStringSlice(va) + stringSlice, err := mustStringSlice(valueAsSlice) if err != nil { panic(err) } - return vas + return stringSlice } - vsa, ok := v.(string) + valueAsString, ok := value.(string) if !ok { - panic(fmt.Sprintf("value is not a string or a slice of strings: %#v", v)) + panic(fmt.Sprintf("value is not a string or a slice of strings: %#v", value)) } - return []string{vsa} + return []string{valueAsString} } -// constDescriptionReplaceables is a slice of strings that are replaced in descriptions. +// constDescriptionReplaceables is a map of strings that are replaced in descriptions. var constDescriptionReplaceables = map[string]string{ "DEPRECATED: ": "", "This setting is deprecated. ": "", "[seconds]": "(seconds)", } -// descriptionForProperty is a function that returns the description for a property. -func descriptionForProperty(p map[string]interface{}, t string) (id bool, d string) { - if da, ok := p["description"].(string); ok { - d = da +// descriptionForProperty returns the description for a property. +func descriptionForProperty( + property map[string]any, + terraformType string, +) (isDeprecated bool, description string) { + if descriptionValue, ok := property["description"].(string); ok { + description = descriptionValue } else { - d = p["title"].(string) + description = property["title"].(string) } - if strings.Contains(strings.ToLower(d), "deprecated") { - id = true - } + isDeprecated = strings.Contains(strings.ToLower(description), "deprecated") - // sc is short for "should capitalize". - sc := false + // shouldCapitalize is a flag indicating if the first letter should be capitalized. + shouldCapitalize := false // Some descriptions have a built-in deprecation notice, so we need to remove it. - for k, v := range constDescriptionReplaceables { - pd := d + for old, new := range constDescriptionReplaceables { + previousDescription := description - d = strings.ReplaceAll(d, k, v) + description = strings.ReplaceAll(description, old, new) - if pd != d { - sc = true + if previousDescription != description { + shouldCapitalize = true } } - b := Desc(d) + descriptionBuilder := Desc(description) - if sc { - b = b.ForceFirstLetterCapitalization() + if shouldCapitalize { + descriptionBuilder = descriptionBuilder.ForceFirstLetterCapitalization() } - if def, ok := p["default"]; ok && isTerraformTypePrimitive(t) { - skip := false + if defaultValue, ok := property["default"]; ok && isTerraformTypePrimitive(terraformType) { + skipDefaultValue := false - if adef, ok := def.(string); ok { - if adef == "" { - skip = true + if defaultValueAsString, ok := defaultValue.(string); ok { + if defaultValueAsString == "" { + skipDefaultValue = true } } - if !skip { - b = b.DefaultValue(def) + if !skipDefaultValue { + descriptionBuilder = descriptionBuilder.DefaultValue(defaultValue) } } - d = b.Build() + description = descriptionBuilder.Build() - return id, d + return isDeprecated, description } -// EncodeKey is a function that encodes a key for a Terraform schema. -func EncodeKey(k string) string { - return strings.ReplaceAll(k, ".", "__dot__") +// EncodeKey encodes a key for a Terraform schema. +func EncodeKey(key string) string { + return strings.ReplaceAll(key, ".", "__dot__") } -// DecodeKey is a function that decodes a key for a Terraform schema. -func DecodeKey(k string) string { - return strings.ReplaceAll(k, "__dot__", ".") +// DecodeKey decodes a key for a Terraform schema. +func DecodeKey(key string) string { + return strings.ReplaceAll(key, "__dot__", ".") } -// IsKeyTyped is a function that checks if a key is typed, i.e. has a type suffix in it. -func IsKeyTyped(k string) bool { - return typeSuffixRegExp.MatchString(k) +// IsKeyTyped checks if a key is typed, i.e., has a type suffix in it. +func IsKeyTyped(key string) bool { + return typeSuffixRegExp.MatchString(key) } -// SliceToKeyedMap is a function that converts a slice of strings to a map. -func SliceToKeyedMap(s []interface{}) map[string]struct{} { - r := make(map[string]struct{}) +// SliceToKeyedMap converts a slice of any type to a map with keys of type string. +// It expects that all elements in the slice are of type string, otherwise it will panic. +// The values in the map are of type struct{} to minimize memory usage, as we are only interested in the keys. +func SliceToKeyedMap(slice []any) map[string]struct{} { + // Initialize an empty map with string keys and struct{} values. + result := make(map[string]struct{}) - for _, v := range s { - r[v.(string)] = struct{}{} + // Iterate through each element in the slice. + for _, value := range slice { + // Assert that the element is of type string, and then use it as a key in the map. + // The value associated with each key is an empty struct{}. + result[value.(string)] = struct{}{} } - return r + // Return the resulting map. + return result } diff --git a/internal/sdkprovider/service/cassandra/cassandra.go b/internal/sdkprovider/service/cassandra/cassandra.go index e9bfb72ed..b3fbd4275 100644 --- a/internal/sdkprovider/service/cassandra/cassandra.go +++ b/internal/sdkprovider/service/cassandra/cassandra.go @@ -5,8 +5,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func cassandraSchema() map[string]*schema.Schema { @@ -19,7 +19,7 @@ func cassandraSchema() map[string]*schema.Schema { Schema: map[string]*schema.Schema{}, }, } - s[schemautil.ServiceTypeCassandra+"_user_config"] = dist.ServiceTypeCassandra() + s[schemautil.ServiceTypeCassandra+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeCassandra) return s } @@ -33,7 +33,6 @@ func ResourceCassandra() *schema.Resource { DeleteContext: schemautil.ResourceServiceDelete, CustomizeDiff: customdiff.Sequence( schemautil.SetServiceTypeIfEmpty(schemautil.ServiceTypeCassandra), - schemautil.CustomizeDiffDisallowMultipleManyToOneKeys, customdiff.IfValueChange("tag", schemautil.TagsShouldNotBeEmpty, schemautil.CustomizeDiffCheckUniqueTag, diff --git a/internal/sdkprovider/service/clickhouse/clickhouse.go b/internal/sdkprovider/service/clickhouse/clickhouse.go index 51c58a636..61142818f 100644 --- a/internal/sdkprovider/service/clickhouse/clickhouse.go +++ b/internal/sdkprovider/service/clickhouse/clickhouse.go @@ -5,7 +5,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func clickhouseSchema() map[string]*schema.Schema { @@ -18,7 +18,7 @@ func clickhouseSchema() map[string]*schema.Schema { Schema: map[string]*schema.Schema{}, }, } - s[schemautil.ServiceTypeClickhouse+"_user_config"] = dist.ServiceTypeClickhouse() + s[schemautil.ServiceTypeClickhouse+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeClickhouse) s["service_integrations"] = &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -51,7 +51,6 @@ func ResourceClickhouse() *schema.Resource { DeleteContext: schemautil.ResourceServiceDelete, CustomizeDiff: customdiff.Sequence( schemautil.SetServiceTypeIfEmpty(schemautil.ServiceTypeClickhouse), - schemautil.CustomizeDiffDisallowMultipleManyToOneKeys, customdiff.IfValueChange("tag", schemautil.TagsShouldNotBeEmpty, schemautil.CustomizeDiffCheckUniqueTag, diff --git a/internal/sdkprovider/service/flink/flink.go b/internal/sdkprovider/service/flink/flink.go index 9a14c5f56..730ce6b15 100644 --- a/internal/sdkprovider/service/flink/flink.go +++ b/internal/sdkprovider/service/flink/flink.go @@ -5,8 +5,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func aivenFlinkSchema() map[string]*schema.Schema { @@ -31,7 +31,7 @@ func aivenFlinkSchema() map[string]*schema.Schema { }, }, } - aivenFlinkSchema[schemautil.ServiceTypeFlink+"_user_config"] = dist.ServiceTypeFlink() + aivenFlinkSchema[schemautil.ServiceTypeFlink+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeFlink) return aivenFlinkSchema } @@ -45,7 +45,6 @@ func ResourceFlink() *schema.Resource { DeleteContext: schemautil.ResourceServiceDelete, CustomizeDiff: customdiff.Sequence( schemautil.SetServiceTypeIfEmpty(schemautil.ServiceTypeFlink), - schemautil.CustomizeDiffDisallowMultipleManyToOneKeys, customdiff.IfValueChange("tag", schemautil.TagsShouldNotBeEmpty, schemautil.CustomizeDiffCheckUniqueTag, diff --git a/internal/sdkprovider/service/grafana/grafana.go b/internal/sdkprovider/service/grafana/grafana.go index b4d63a5d8..557b077bb 100644 --- a/internal/sdkprovider/service/grafana/grafana.go +++ b/internal/sdkprovider/service/grafana/grafana.go @@ -5,8 +5,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func grafanaSchema() map[string]*schema.Schema { @@ -19,7 +19,7 @@ func grafanaSchema() map[string]*schema.Schema { Schema: map[string]*schema.Schema{}, }, } - s[schemautil.ServiceTypeGrafana+"_user_config"] = dist.ServiceTypeGrafana() + s[schemautil.ServiceTypeGrafana+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeGrafana) return s } @@ -32,7 +32,6 @@ func ResourceGrafana() *schema.Resource { DeleteContext: schemautil.ResourceServiceDelete, CustomizeDiff: customdiff.Sequence( schemautil.SetServiceTypeIfEmpty(schemautil.ServiceTypeGrafana), - schemautil.CustomizeDiffDisallowMultipleManyToOneKeys, customdiff.IfValueChange("tag", schemautil.TagsShouldNotBeEmpty, schemautil.CustomizeDiffCheckUniqueTag, diff --git a/internal/sdkprovider/service/grafana/grafana_test.go b/internal/sdkprovider/service/grafana/grafana_test.go index caccea153..aafd6692b 100644 --- a/internal/sdkprovider/service/grafana/grafana_test.go +++ b/internal/sdkprovider/service/grafana/grafana_test.go @@ -499,3 +499,65 @@ resource "aiven_grafana" "grafana" { `, prefix, project, ipFilterObjs) } + +// TestAccAiven_grafana_set_change tests that changing a set actually changes it count +// This is a test for diff suppressor doesn't suppress set's items. +func TestAccAiven_grafana_set_change(t *testing.T) { + resourceName := "aiven_grafana.bar" + rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + ProtoV6ProviderFactories: acc.TestProtoV6ProviderFactories, + CheckDestroy: acc.TestAccCheckAivenServiceResourceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccGrafanaResourceSetChange(rName, "100, 101, 111"), + Check: resource.ComposeTestCheckFunc( + acc.TestAccCheckAivenServiceCommonAttributes("data.aiven_grafana.common"), + resource.TestCheckResourceAttr(resourceName, "grafana_user_config.0.auth_github.0.client_id", "my_client_id"), + resource.TestCheckResourceAttr(resourceName, "grafana_user_config.0.auth_github.0.client_secret", "my_client_secret"), + resource.TestCheckResourceAttr(resourceName, "grafana_user_config.0.auth_github.0.team_ids.#", "3"), + ), + }, + { + Config: testAccGrafanaResourceSetChange(rName, "111"), + Check: resource.ComposeTestCheckFunc( + acc.TestAccCheckAivenServiceCommonAttributes("data.aiven_grafana.common"), + resource.TestCheckResourceAttr(resourceName, "grafana_user_config.0.auth_github.0.team_ids.#", "1"), + ), + }, + }, + }) +} + +func testAccGrafanaResourceSetChange(name, teamIDs string) string { + return fmt.Sprintf(` +data "aiven_project" "foo" { + project = "%s" +} + +resource "aiven_grafana" "bar" { + project = data.aiven_project.foo.project + cloud_name = "google-europe-west1" + plan = "startup-1" + service_name = "test-acc-sr-%s" + maintenance_window_dow = "monday" + maintenance_window_time = "10:00:00" + + grafana_user_config { + auth_github { + client_id = "my_client_id" + client_secret = "my_client_secret" + team_ids = [%s] + } + } +} + +data "aiven_grafana" "common" { + service_name = aiven_grafana.bar.service_name + project = data.aiven_project.foo.project + + depends_on = [aiven_grafana.bar] +}`, os.Getenv("AIVEN_PROJECT_NAME"), name, teamIDs) +} diff --git a/internal/sdkprovider/service/influxdb/influxdb.go b/internal/sdkprovider/service/influxdb/influxdb.go index 070153592..ac7263a0c 100644 --- a/internal/sdkprovider/service/influxdb/influxdb.go +++ b/internal/sdkprovider/service/influxdb/influxdb.go @@ -5,8 +5,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func influxDBSchema() map[string]*schema.Schema { @@ -25,7 +25,7 @@ func influxDBSchema() map[string]*schema.Schema { }, }, } - s[schemautil.ServiceTypeInfluxDB+"_user_config"] = dist.ServiceTypeInfluxdb() + s[schemautil.ServiceTypeInfluxDB+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeInfluxDB) return s } @@ -39,7 +39,6 @@ func ResourceInfluxDB() *schema.Resource { DeleteContext: schemautil.ResourceServiceDelete, CustomizeDiff: customdiff.Sequence( schemautil.SetServiceTypeIfEmpty(schemautil.ServiceTypeInfluxDB), - schemautil.CustomizeDiffDisallowMultipleManyToOneKeys, customdiff.IfValueChange("tag", schemautil.TagsShouldNotBeEmpty, schemautil.CustomizeDiffCheckUniqueTag, diff --git a/internal/sdkprovider/service/kafka/kafka.go b/internal/sdkprovider/service/kafka/kafka.go index 094efcb0d..c10e6ff92 100644 --- a/internal/sdkprovider/service/kafka/kafka.go +++ b/internal/sdkprovider/service/kafka/kafka.go @@ -10,8 +10,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func aivenKafkaSchema() map[string]*schema.Schema { @@ -64,13 +64,13 @@ func aivenKafkaSchema() map[string]*schema.Schema { "schema_registry_uri": { Type: schema.TypeString, Computed: true, - Description: "The Schema Registry URI, if any", + Description: "The UserConfig Registry URI, if any", Sensitive: true, }, }, }, } - aivenKafkaSchema[schemautil.ServiceTypeKafka+"_user_config"] = dist.ServiceTypeKafka() + aivenKafkaSchema[schemautil.ServiceTypeKafka+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeKafka) return aivenKafkaSchema } @@ -90,7 +90,6 @@ func ResourceKafka() *schema.Resource { Schema: aivenKafkaSchema(), CustomizeDiff: customdiff.Sequence( schemautil.SetServiceTypeIfEmpty(schemautil.ServiceTypeKafka), - schemautil.CustomizeDiffDisallowMultipleManyToOneKeys, customdiff.IfValueChange("tag", schemautil.TagsShouldNotBeEmpty, schemautil.CustomizeDiffCheckUniqueTag, @@ -144,7 +143,7 @@ func resourceKafkaCreate(ctx context.Context, d *schema.ResourceData, m interfac return di } - // if default_acl=false delete default wildcard Kafka ACL and ACLs for Schema Registry that are automatically created + // if default_acl=false delete default wildcard Kafka ACL and ACLs for UserConfig Registry that are automatically created if !d.Get("default_acl").(bool) { client := m.(*aiven.Client) project := d.Get("project").(string) @@ -166,7 +165,7 @@ func resourceKafkaCreate(ctx context.Context, d *schema.ResourceData, m interfac } for _, acl := range defaultSchemaACLLs { if err := client.KafkaSchemaRegistryACLs.Delete(ctx, project, serviceName, acl); err != nil && !aiven.IsNotFound(err) { - return diag.Errorf("cannot delete `%s` kafka ACL for Schema Registry: %s", acl, err) + return diag.Errorf("cannot delete `%s` kafka ACL for UserConfig Registry: %s", acl, err) } } } @@ -212,7 +211,7 @@ func resourceKafkaRead(ctx context.Context, d *schema.ResourceData, m interface{ (kafkaRest && !kafka.Features.KafkaRest)) { diags = append(diags, diag.Diagnostic{ Severity: diag.Warning, - Summary: "You are using Confluent Schema Registry v5.0 that is no longer supported " + + Summary: "You are using Confluent UserConfig Registry v5.0 that is no longer supported " + "on Kafka v3.0. Please switch to Karapace, a drop-in open source replacement " + "before proceeding with the upgrade. To do that use aiven_kafka.karapace=true " + "that will switch the service to use Karapace for schema registry and REST proxy. " + diff --git a/internal/sdkprovider/service/kafka/kafka_connect.go b/internal/sdkprovider/service/kafka/kafka_connect.go index 7b7b8af9c..7f7700ddb 100644 --- a/internal/sdkprovider/service/kafka/kafka_connect.go +++ b/internal/sdkprovider/service/kafka/kafka_connect.go @@ -5,8 +5,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func aivenKafkaConnectSchema() map[string]*schema.Schema { @@ -19,7 +19,7 @@ func aivenKafkaConnectSchema() map[string]*schema.Schema { Schema: map[string]*schema.Schema{}, }, } - kafkaConnectSchema[schemautil.ServiceTypeKafkaConnect+"_user_config"] = dist.ServiceTypeKafkaConnect() + kafkaConnectSchema[schemautil.ServiceTypeKafkaConnect+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeKafkaConnect) return kafkaConnectSchema } @@ -33,7 +33,6 @@ func ResourceKafkaConnect() *schema.Resource { DeleteContext: schemautil.ResourceServiceDelete, CustomizeDiff: customdiff.Sequence( schemautil.SetServiceTypeIfEmpty(schemautil.ServiceTypeKafkaConnect), - schemautil.CustomizeDiffDisallowMultipleManyToOneKeys, customdiff.IfValueChange("disk_space", schemautil.DiskSpaceShouldNotBeEmpty, schemautil.CustomizeDiffCheckDiskSpace, diff --git a/internal/sdkprovider/service/kafka/kafka_mirrormaker.go b/internal/sdkprovider/service/kafka/kafka_mirrormaker.go index d291c71b6..e76852752 100644 --- a/internal/sdkprovider/service/kafka/kafka_mirrormaker.go +++ b/internal/sdkprovider/service/kafka/kafka_mirrormaker.go @@ -5,8 +5,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func aivenKafkaMirrormakerSchema() map[string]*schema.Schema { @@ -19,7 +19,7 @@ func aivenKafkaMirrormakerSchema() map[string]*schema.Schema { Schema: map[string]*schema.Schema{}, }, } - kafkaMMSchema[schemautil.ServiceTypeKafkaMirrormaker+"_user_config"] = dist.ServiceTypeKafkaMirrormaker() + kafkaMMSchema[schemautil.ServiceTypeKafkaMirrormaker+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeKafkaMirrormaker) return kafkaMMSchema } @@ -32,7 +32,6 @@ func ResourceKafkaMirrormaker() *schema.Resource { DeleteContext: schemautil.ResourceServiceDelete, CustomizeDiff: customdiff.Sequence( schemautil.SetServiceTypeIfEmpty(schemautil.ServiceTypeKafkaMirrormaker), - schemautil.CustomizeDiffDisallowMultipleManyToOneKeys, customdiff.IfValueChange("disk_space", schemautil.DiskSpaceShouldNotBeEmpty, schemautil.CustomizeDiffCheckDiskSpace, diff --git a/internal/sdkprovider/service/kafka/kafka_test.go b/internal/sdkprovider/service/kafka/kafka_test.go index 4e85d7810..68699dc20 100644 --- a/internal/sdkprovider/service/kafka/kafka_test.go +++ b/internal/sdkprovider/service/kafka/kafka_test.go @@ -343,7 +343,7 @@ func testAccCheckAivenServiceKafkaAttributes(n string) resource.TestCheckFunc { } } -func testAccKafkaResourceUserConfigKafkaNullFieldsOnly(project, prefix string) string { +func testAccKafkaResourceUserConfigKafkaOmitsNullFields(project, prefix string) string { return fmt.Sprintf(` resource "aiven_kafka" "kafka" { project = "%s" @@ -366,7 +366,7 @@ resource "aiven_kafka" "kafka" { `, project, prefix) } -func TestAccAiven_kafka_userconfig_kafka_null_fields_only(t *testing.T) { +func TestAccAiven_kafka_user_config_kafka_omits_null_fields(t *testing.T) { project := os.Getenv("AIVEN_PROJECT_NAME") prefix := "test-tf-acc-" + acctest.RandString(7) resourceName := "aiven_kafka.kafka" @@ -376,13 +376,11 @@ func TestAccAiven_kafka_userconfig_kafka_null_fields_only(t *testing.T) { CheckDestroy: acc.TestAccCheckAivenServiceResourceDestroy, Steps: []resource.TestStep{ { - Config: testAccKafkaResourceUserConfigKafkaNullFieldsOnly(project, prefix), + Config: testAccKafkaResourceUserConfigKafkaOmitsNullFields(project, prefix), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "state", "RUNNING"), resource.TestCheckResourceAttr(resourceName, "kafka_user_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "kafka_user_config.0.kafka.#", "1"), - resource.TestCheckResourceAttr(resourceName, "kafka_user_config.0.kafka.0.group_max_session_timeout_ms", "0"), - resource.TestCheckResourceAttr(resourceName, "kafka_user_config.0.kafka.0.log_retention_bytes", "0"), + resource.TestCheckResourceAttr(resourceName, "kafka_user_config.0.kafka.#", "0"), ), }, }, diff --git a/internal/sdkprovider/service/m3db/m3aggregator.go b/internal/sdkprovider/service/m3db/m3aggregator.go index a5c2cabd0..5593bf34b 100644 --- a/internal/sdkprovider/service/m3db/m3aggregator.go +++ b/internal/sdkprovider/service/m3db/m3aggregator.go @@ -5,8 +5,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func aivenM3AggregatorSchema() map[string]*schema.Schema { @@ -19,7 +19,7 @@ func aivenM3AggregatorSchema() map[string]*schema.Schema { Schema: map[string]*schema.Schema{}, }, } - schemaM3[schemautil.ServiceTypeM3Aggregator+"_user_config"] = dist.ServiceTypeM3aggregator() + schemaM3[schemautil.ServiceTypeM3Aggregator+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeM3Aggregator) return schemaM3 } @@ -32,7 +32,6 @@ func ResourceM3Aggregator() *schema.Resource { DeleteContext: schemautil.ResourceServiceDelete, CustomizeDiff: customdiff.Sequence( schemautil.SetServiceTypeIfEmpty(schemautil.ServiceTypeM3Aggregator), - schemautil.CustomizeDiffDisallowMultipleManyToOneKeys, customdiff.IfValueChange("disk_space", schemautil.DiskSpaceShouldNotBeEmpty, schemautil.CustomizeDiffCheckDiskSpace, diff --git a/internal/sdkprovider/service/m3db/m3db.go b/internal/sdkprovider/service/m3db/m3db.go index ce3197097..ed5de0d25 100644 --- a/internal/sdkprovider/service/m3db/m3db.go +++ b/internal/sdkprovider/service/m3db/m3db.go @@ -5,8 +5,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func aivenM3DBSchema() map[string]*schema.Schema { @@ -19,7 +19,7 @@ func aivenM3DBSchema() map[string]*schema.Schema { Schema: map[string]*schema.Schema{}, }, } - schemaM3[schemautil.ServiceTypeM3+"_user_config"] = dist.ServiceTypeM3db() + schemaM3[schemautil.ServiceTypeM3+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeM3) return schemaM3 } @@ -32,7 +32,6 @@ func ResourceM3DB() *schema.Resource { DeleteContext: schemautil.ResourceServiceDelete, CustomizeDiff: customdiff.Sequence( schemautil.SetServiceTypeIfEmpty(schemautil.ServiceTypeM3), - schemautil.CustomizeDiffDisallowMultipleManyToOneKeys, customdiff.IfValueChange("tag", schemautil.TagsShouldNotBeEmpty, schemautil.CustomizeDiffCheckUniqueTag, diff --git a/internal/sdkprovider/service/mysql/mysql.go b/internal/sdkprovider/service/mysql/mysql.go index 499160330..d63ccb49b 100644 --- a/internal/sdkprovider/service/mysql/mysql.go +++ b/internal/sdkprovider/service/mysql/mysql.go @@ -5,8 +5,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func aivenMySQLSchema() map[string]*schema.Schema { @@ -19,7 +19,7 @@ func aivenMySQLSchema() map[string]*schema.Schema { Schema: map[string]*schema.Schema{}, }, } - schemaMySQL[schemautil.ServiceTypeMySQL+"_user_config"] = dist.ServiceTypeMysql() + schemaMySQL[schemautil.ServiceTypeMySQL+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeMySQL) return schemaMySQL } @@ -32,7 +32,6 @@ func ResourceMySQL() *schema.Resource { DeleteContext: schemautil.ResourceServiceDelete, CustomizeDiff: customdiff.Sequence( schemautil.SetServiceTypeIfEmpty(schemautil.ServiceTypeMySQL), - schemautil.CustomizeDiffDisallowMultipleManyToOneKeys, customdiff.IfValueChange("tag", schemautil.TagsShouldNotBeEmpty, schemautil.CustomizeDiffCheckUniqueTag, diff --git a/internal/sdkprovider/service/opensearch/opensearch.go b/internal/sdkprovider/service/opensearch/opensearch.go index 6076d8753..1a6197066 100644 --- a/internal/sdkprovider/service/opensearch/opensearch.go +++ b/internal/sdkprovider/service/opensearch/opensearch.go @@ -5,8 +5,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func opensearchSchema() map[string]*schema.Schema { @@ -26,7 +26,7 @@ func opensearchSchema() map[string]*schema.Schema { }, }, } - s[schemautil.ServiceTypeOpenSearch+"_user_config"] = dist.ServiceTypeOpensearch() + s[schemautil.ServiceTypeOpenSearch+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeOpenSearch) return s } @@ -40,7 +40,6 @@ func ResourceOpenSearch() *schema.Resource { DeleteContext: schemautil.ResourceServiceDelete, CustomizeDiff: customdiff.Sequence( schemautil.SetServiceTypeIfEmpty(schemautil.ServiceTypeOpenSearch), - schemautil.CustomizeDiffDisallowMultipleManyToOneKeys, customdiff.IfValueChange("tag", schemautil.TagsShouldNotBeEmpty, schemautil.CustomizeDiffCheckUniqueTag, diff --git a/internal/sdkprovider/service/pg/pg.go b/internal/sdkprovider/service/pg/pg.go index dad48acc5..b412e0153 100644 --- a/internal/sdkprovider/service/pg/pg.go +++ b/internal/sdkprovider/service/pg/pg.go @@ -12,10 +12,8 @@ import ( "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/apiconvert" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func aivenPGSchema() map[string]*schema.Schema { @@ -80,7 +78,7 @@ func aivenPGSchema() map[string]*schema.Schema { }, }, } - schemaPG[schemautil.ServiceTypePG+"_user_config"] = dist.ServiceTypePg() + schemaPG[schemautil.ServiceTypePG+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypePG) return schemaPG } @@ -94,7 +92,6 @@ func ResourcePG() *schema.Resource { DeleteContext: schemautil.ResourceServiceDelete, CustomizeDiff: customdiff.Sequence( schemautil.SetServiceTypeIfEmpty(schemautil.ServiceTypePG), - schemautil.CustomizeDiffDisallowMultipleManyToOneKeys, customdiff.IfValueChange("tag", schemautil.TagsShouldNotBeEmpty, schemautil.CustomizeDiffCheckUniqueTag, @@ -135,11 +132,10 @@ func resourceServicePGUpdate(ctx context.Context, d *schema.ResourceData, m inte return diag.FromErr(err) } - userConfig, err := apiconvert.ToAPI(userconfig.ServiceTypes, "pg", d) + userConfig, err := schemautil.ExpandService(schemautil.ServiceTypePG, d) if err != nil { return diag.FromErr(err) } - if userConfig["pg_version"] != nil { s, err := client.Services.Get(ctx, projectName, serviceName) if err != nil { diff --git a/internal/sdkprovider/service/redis/redis.go b/internal/sdkprovider/service/redis/redis.go index 3d6b8f71c..93cd50d4a 100644 --- a/internal/sdkprovider/service/redis/redis.go +++ b/internal/sdkprovider/service/redis/redis.go @@ -5,8 +5,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" + "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/service" ) func redisSchema() map[string]*schema.Schema { @@ -19,7 +19,7 @@ func redisSchema() map[string]*schema.Schema { Schema: map[string]*schema.Schema{}, }, } - s[schemautil.ServiceTypeRedis+"_user_config"] = dist.ServiceTypeRedis() + s[schemautil.ServiceTypeRedis+"_user_config"] = service.GetUserConfig(schemautil.ServiceTypeRedis) return s } @@ -33,7 +33,6 @@ func ResourceRedis() *schema.Resource { DeleteContext: schemautil.ResourceServiceDelete, CustomizeDiff: customdiff.Sequence( schemautil.SetServiceTypeIfEmpty(schemautil.ServiceTypeRedis), - schemautil.CustomizeDiffDisallowMultipleManyToOneKeys, customdiff.IfValueChange("tag", schemautil.TagsShouldNotBeEmpty, schemautil.CustomizeDiffCheckUniqueTag, diff --git a/internal/sdkprovider/userconfig/service/cassandra.go b/internal/sdkprovider/userconfig/service/cassandra.go new file mode 100644 index 000000000..8ceb4d918 --- /dev/null +++ b/internal/sdkprovider/userconfig/service/cassandra.go @@ -0,0 +1,159 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func schemaCassandra() *schema.Schema { + return &schema.Schema{ + Description: "Cassandra user configurable settings", + DiffSuppressFunc: diffSuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "additional_backup_regions": { + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{ + Description: "Target cloud.", + Type: schema.TypeString, + }, + MaxItems: 1, + Optional: true, + Type: schema.TypeSet, + }, + "backup_hour": { + Description: "The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed.", + Optional: true, + Type: schema.TypeInt, + }, + "backup_minute": { + Description: "The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed.", + Optional: true, + Type: schema.TypeInt, + }, + "cassandra": { + Description: "cassandra configuration values", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "batch_size_fail_threshold_in_kb": { + Description: "Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default.", + Optional: true, + Type: schema.TypeInt, + }, + "batch_size_warn_threshold_in_kb": { + Description: "Log a warning message on any multiple-partition batch size exceeding this value.5kb per batch by default.Caution should be taken on increasing the size of this thresholdas it can lead to node instability.", + Optional: true, + Type: schema.TypeInt, + }, + "datacenter": { + Description: "Name of the datacenter to which nodes of this service belong. Can be set only when creating the service.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "cassandra_version": { + Description: "Cassandra major version.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"4", "3"}, false), + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "migrate_sstableloader": { + Description: "Sets the service into migration mode enabling the sstableloader utility to be used to upload Cassandra data files. Available only on service create.", + Optional: true, + Type: schema.TypeBool, + }, + "private_access": { + Description: "Allow access to selected service ports from private networks", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"prometheus": { + Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "project_to_fork_from": { + Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "public_access": { + Description: "Allow access to selected service ports from the public Internet", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"prometheus": { + Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "service_to_fork_from": { + Description: "Name of another service to fork from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "service_to_join_with": { + Description: "When bootstrapping, instead of creating a new Cassandra cluster try to join an existing one from another service. Can only be set on service creation.", + Optional: true, + Type: schema.TypeString, + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/clickhouse.go b/internal/sdkprovider/userconfig/service/clickhouse.go new file mode 100644 index 000000000..3a8f7e7d5 --- /dev/null +++ b/internal/sdkprovider/userconfig/service/clickhouse.go @@ -0,0 +1,154 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + +func schemaClickhouse() *schema.Schema { + return &schema.Schema{ + Description: "Clickhouse user configurable settings", + DiffSuppressFunc: diffSuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "additional_backup_regions": { + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{ + Description: "Target cloud.", + Type: schema.TypeString, + }, + MaxItems: 1, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "private_access": { + Description: "Allow access to selected service ports from private networks", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "clickhouse": { + Description: "Allow clients to connect to clickhouse with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "clickhouse_https": { + Description: "Allow clients to connect to clickhouse_https with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "privatelink_access": { + Description: "Allow access to selected service components through Privatelink", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "clickhouse": { + Description: "Enable clickhouse.", + Optional: true, + Type: schema.TypeBool, + }, + "clickhouse_https": { + Description: "Enable clickhouse_https.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Enable prometheus.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "project_to_fork_from": { + Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "public_access": { + Description: "Allow access to selected service ports from the public Internet", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "clickhouse": { + Description: "Allow clients to connect to clickhouse from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "clickhouse_https": { + Description: "Allow clients to connect to clickhouse_https from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "service_to_fork_from": { + Description: "Name of another service to fork from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/flink.go b/internal/sdkprovider/userconfig/service/flink.go new file mode 100644 index 000000000..a04c68818 --- /dev/null +++ b/internal/sdkprovider/userconfig/service/flink.go @@ -0,0 +1,90 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func schemaFlink() *schema.Schema { + return &schema.Schema{ + Description: "Flink user configurable settings", + DiffSuppressFunc: diffSuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "flink_version": { + Description: "Flink major version.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"1.16"}, false), + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "number_of_task_slots": { + Description: "Task slots per node. For a 3 node plan, total number of task slots is 3x this value.", + Optional: true, + Type: schema.TypeInt, + }, + "privatelink_access": { + Description: "Allow access to selected service components through Privatelink", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "flink": { + Description: "Enable flink.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Enable prometheus.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/grafana.go b/internal/sdkprovider/userconfig/service/grafana.go new file mode 100644 index 000000000..a4f9d9705 --- /dev/null +++ b/internal/sdkprovider/userconfig/service/grafana.go @@ -0,0 +1,617 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func schemaGrafana() *schema.Schema { + return &schema.Schema{ + Description: "Grafana user configurable settings", + DiffSuppressFunc: diffSuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "additional_backup_regions": { + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{ + Description: "Target cloud.", + Type: schema.TypeString, + }, + MaxItems: 1, + Optional: true, + Type: schema.TypeSet, + }, + "alerting_enabled": { + Description: "Enable or disable Grafana alerting functionality.", + Optional: true, + Type: schema.TypeBool, + }, + "alerting_error_or_timeout": { + Description: "Default error or timeout setting for new alerting rules.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"alerting", "keep_state"}, false), + }, + "alerting_max_annotations_to_keep": { + Description: "Max number of alert annotations that Grafana stores. 0 (default) keeps all alert annotations.", + Optional: true, + Type: schema.TypeInt, + }, + "alerting_nodata_or_nullvalues": { + Description: "Default value for 'no data or null values' for new alerting rules.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"alerting", "no_data", "keep_state", "ok"}, false), + }, + "allow_embedding": { + Description: "Allow embedding Grafana dashboards with iframe/frame/object/embed tags. Disabled by default to limit impact of clickjacking.", + Optional: true, + Type: schema.TypeBool, + }, + "auth_azuread": { + Description: "Azure AD OAuth integration", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "allow_sign_up": { + Description: "Automatically sign-up users on successful sign-in.", + Optional: true, + Type: schema.TypeBool, + }, + "allowed_domains": { + Description: "Allowed domains.", + Elem: &schema.Schema{ + Description: "Allowed domain.", + Type: schema.TypeString, + }, + MaxItems: 50, + Optional: true, + Type: schema.TypeSet, + }, + "allowed_groups": { + Description: "Require users to belong to one of given groups.", + Elem: &schema.Schema{ + Description: "Group Object ID from Azure AD.", + Type: schema.TypeString, + }, + MaxItems: 50, + Optional: true, + Type: schema.TypeSet, + }, + "auth_url": { + Description: "Authorization URL.", + Required: true, + Type: schema.TypeString, + }, + "client_id": { + Description: "Client ID from provider.", + Required: true, + Type: schema.TypeString, + }, + "client_secret": { + Description: "Client secret from provider.", + Required: true, + Type: schema.TypeString, + }, + "token_url": { + Description: "Token URL.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "auth_basic_enabled": { + Description: "Enable or disable basic authentication form, used by Grafana built-in login.", + Optional: true, + Type: schema.TypeBool, + }, + "auth_generic_oauth": { + Description: "Generic OAuth integration", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "allow_sign_up": { + Description: "Automatically sign-up users on successful sign-in.", + Optional: true, + Type: schema.TypeBool, + }, + "allowed_domains": { + Description: "Allowed domains.", + Elem: &schema.Schema{ + Description: "Allowed domain.", + Type: schema.TypeString, + }, + MaxItems: 50, + Optional: true, + Type: schema.TypeSet, + }, + "allowed_organizations": { + Description: "Require user to be member of one of the listed organizations.", + Elem: &schema.Schema{ + Description: "Allowed organization.", + Type: schema.TypeString, + }, + MaxItems: 50, + Optional: true, + Type: schema.TypeSet, + }, + "api_url": { + Description: "API URL.", + Required: true, + Type: schema.TypeString, + }, + "auth_url": { + Description: "Authorization URL.", + Required: true, + Type: schema.TypeString, + }, + "auto_login": { + Description: "Allow users to bypass the login screen and automatically log in.", + Optional: true, + Type: schema.TypeBool, + }, + "client_id": { + Description: "Client ID from provider.", + Required: true, + Type: schema.TypeString, + }, + "client_secret": { + Description: "Client secret from provider.", + Required: true, + Type: schema.TypeString, + }, + "name": { + Description: "Name of the OAuth integration.", + Optional: true, + Type: schema.TypeString, + }, + "scopes": { + Description: "OAuth scopes.", + Elem: &schema.Schema{ + Description: "OAuth scope.", + Type: schema.TypeString, + }, + MaxItems: 50, + Optional: true, + Type: schema.TypeSet, + }, + "token_url": { + Description: "Token URL.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "auth_github": { + Description: "Github Auth integration", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "allow_sign_up": { + Description: "Automatically sign-up users on successful sign-in.", + Optional: true, + Type: schema.TypeBool, + }, + "allowed_organizations": { + Description: "Require users to belong to one of given organizations.", + Elem: &schema.Schema{ + Description: "Organization name.", + Type: schema.TypeString, + }, + MaxItems: 50, + Optional: true, + Type: schema.TypeSet, + }, + "client_id": { + Description: "Client ID from provider.", + Required: true, + Type: schema.TypeString, + }, + "client_secret": { + Description: "Client secret from provider.", + Required: true, + Type: schema.TypeString, + }, + "team_ids": { + Description: "Require users to belong to one of given team IDs.", + Elem: &schema.Schema{ + Description: "Team ID.", + Type: schema.TypeInt, + }, + MaxItems: 50, + Optional: true, + Type: schema.TypeSet, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "auth_gitlab": { + Description: "GitLab Auth integration", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "allow_sign_up": { + Description: "Automatically sign-up users on successful sign-in.", + Optional: true, + Type: schema.TypeBool, + }, + "allowed_groups": { + Description: "Require users to belong to one of given groups.", + Elem: &schema.Schema{ + Description: "Group or subgroup name.", + Type: schema.TypeString, + }, + MaxItems: 50, + Required: true, + Type: schema.TypeSet, + }, + "api_url": { + Description: "API URL. This only needs to be set when using self hosted GitLab.", + Optional: true, + Type: schema.TypeString, + }, + "auth_url": { + Description: "Authorization URL. This only needs to be set when using self hosted GitLab.", + Optional: true, + Type: schema.TypeString, + }, + "client_id": { + Description: "Client ID from provider.", + Required: true, + Type: schema.TypeString, + }, + "client_secret": { + Description: "Client secret from provider.", + Required: true, + Type: schema.TypeString, + }, + "token_url": { + Description: "Token URL. This only needs to be set when using self hosted GitLab.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "auth_google": { + Description: "Google Auth integration", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "allow_sign_up": { + Description: "Automatically sign-up users on successful sign-in.", + Optional: true, + Type: schema.TypeBool, + }, + "allowed_domains": { + Description: "Domains allowed to sign-in to this Grafana.", + Elem: &schema.Schema{ + Description: "Domain.", + Type: schema.TypeString, + }, + MaxItems: 64, + Required: true, + Type: schema.TypeSet, + }, + "client_id": { + Description: "Client ID from provider.", + Required: true, + Type: schema.TypeString, + }, + "client_secret": { + Description: "Client secret from provider.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "cookie_samesite": { + Description: "Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"lax", "strict", "none"}, false), + }, + "custom_domain": { + Description: "Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.", + Optional: true, + Type: schema.TypeString, + }, + "dashboard_previews_enabled": { + Description: "This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.", + Optional: true, + Type: schema.TypeBool, + }, + "dashboards_min_refresh_interval": { + Description: "Signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s, 1h.", + Optional: true, + Type: schema.TypeString, + }, + "dashboards_versions_to_keep": { + Description: "Dashboard versions to keep per dashboard.", + Optional: true, + Type: schema.TypeInt, + }, + "dataproxy_send_user_header": { + Description: "Send 'X-Grafana-User' header to data source.", + Optional: true, + Type: schema.TypeBool, + }, + "dataproxy_timeout": { + Description: "Timeout for data proxy requests in seconds.", + Optional: true, + Type: schema.TypeInt, + }, + "date_formats": { + Description: "Grafana date format specifications", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "default_timezone": { + Description: "Default time zone for user preferences. Value 'browser' uses browser local time zone.", + Optional: true, + Type: schema.TypeString, + }, + "full_date": { + Description: "Moment.js style format string for cases where full date is shown.", + Optional: true, + Type: schema.TypeString, + }, + "interval_day": { + Description: "Moment.js style format string used when a time requiring day accuracy is shown.", + Optional: true, + Type: schema.TypeString, + }, + "interval_hour": { + Description: "Moment.js style format string used when a time requiring hour accuracy is shown.", + Optional: true, + Type: schema.TypeString, + }, + "interval_minute": { + Description: "Moment.js style format string used when a time requiring minute accuracy is shown.", + Optional: true, + Type: schema.TypeString, + }, + "interval_month": { + Description: "Moment.js style format string used when a time requiring month accuracy is shown.", + Optional: true, + Type: schema.TypeString, + }, + "interval_second": { + Description: "Moment.js style format string used when a time requiring second accuracy is shown.", + Optional: true, + Type: schema.TypeString, + }, + "interval_year": { + Description: "Moment.js style format string used when a time requiring year accuracy is shown.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "disable_gravatar": { + Description: "Set to true to disable gravatar. Defaults to false (gravatar is enabled).", + Optional: true, + Type: schema.TypeBool, + }, + "editors_can_admin": { + Description: "Editors can manage folders, teams and dashboards created by them.", + Optional: true, + Type: schema.TypeBool, + }, + "external_image_storage": { + Description: "External image store settings", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "access_key": { + Description: "S3 access key. Requires permissions to the S3 bucket for the s3:PutObject and s3:PutObjectAcl actions.", + Required: true, + Type: schema.TypeString, + }, + "bucket_url": { + Description: "Bucket URL for S3.", + Required: true, + Type: schema.TypeString, + }, + "provider": { + Description: "Provider type.", + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"s3"}, false), + }, + "secret_key": { + Description: "S3 secret key.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "google_analytics_ua_id": { + Description: "Google Analytics ID.", + Optional: true, + Type: schema.TypeString, + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "metrics_enabled": { + Description: "Enable Grafana /metrics endpoint.", + Optional: true, + Type: schema.TypeBool, + }, + "oauth_allow_insecure_email_lookup": { + Description: "Enforce user lookup based on email instead of the unique ID provided by the IdP.", + Optional: true, + Type: schema.TypeBool, + }, + "private_access": { + Description: "Allow access to selected service ports from private networks", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"grafana": { + Description: "Allow clients to connect to grafana with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "privatelink_access": { + Description: "Allow access to selected service components through Privatelink", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"grafana": { + Description: "Enable grafana.", + Optional: true, + Type: schema.TypeBool, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "project_to_fork_from": { + Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "public_access": { + Description: "Allow access to selected service ports from the public Internet", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"grafana": { + Description: "Allow clients to connect to grafana from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "recovery_basebackup_name": { + Description: "Name of the basebackup to restore in forked service.", + Optional: true, + Type: schema.TypeString, + }, + "service_to_fork_from": { + Description: "Name of another service to fork from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "smtp_server": { + Description: "SMTP server settings", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "from_address": { + Description: "Address used for sending emails.", + Required: true, + Type: schema.TypeString, + }, + "from_name": { + Description: "Name used in outgoing emails, defaults to Grafana.", + Optional: true, + Type: schema.TypeString, + }, + "host": { + Description: "Server hostname or IP.", + Required: true, + Type: schema.TypeString, + }, + "password": { + Description: "Password for SMTP authentication.", + Optional: true, + Sensitive: true, + Type: schema.TypeString, + }, + "port": { + Description: "SMTP server port.", + Required: true, + Type: schema.TypeInt, + }, + "skip_verify": { + Description: "Skip verifying server certificate. Defaults to false.", + Optional: true, + Type: schema.TypeBool, + }, + "starttls_policy": { + Description: "Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"OpportunisticStartTLS", "MandatoryStartTLS", "NoStartTLS"}, false), + }, + "username": { + Description: "Username for SMTP authentication.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + "user_auto_assign_org": { + Description: "Auto-assign new users on signup to main organization. Defaults to false.", + Optional: true, + Type: schema.TypeBool, + }, + "user_auto_assign_org_role": { + Description: "Set role for new signups. Defaults to Viewer.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"Viewer", "Admin", "Editor"}, false), + }, + "viewers_can_edit": { + Description: "Users with view-only permission can edit but not save dashboards.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/influxdb.go b/internal/sdkprovider/userconfig/service/influxdb.go new file mode 100644 index 000000000..124adc203 --- /dev/null +++ b/internal/sdkprovider/userconfig/service/influxdb.go @@ -0,0 +1,171 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + +func schemaInfluxdb() *schema.Schema { + return &schema.Schema{ + Description: "Influxdb user configurable settings", + DiffSuppressFunc: diffSuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "additional_backup_regions": { + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{ + Description: "Target cloud.", + Type: schema.TypeString, + }, + MaxItems: 1, + Optional: true, + Type: schema.TypeSet, + }, + "custom_domain": { + Description: "Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.", + Optional: true, + Type: schema.TypeString, + }, + "influxdb": { + Description: "influxdb.conf configuration values", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "log_queries_after": { + Description: "The maximum duration in seconds before a query is logged as a slow query. Setting this to 0 (the default) will never log slow queries.", + Optional: true, + Type: schema.TypeInt, + }, + "max_connection_limit": { + Description: "Maximum number of connections to InfluxDB. Setting this to 0 (default) means no limit. If using max_connection_limit, it is recommended to set the value to be large enough in order to not block clients unnecessarily.", + Optional: true, + Type: schema.TypeInt, + }, + "max_row_limit": { + Description: "The maximum number of rows returned in a non-chunked query. Setting this to 0 (the default) allows an unlimited number to be returned.", + Optional: true, + Type: schema.TypeInt, + }, + "max_select_buckets": { + Description: "The maximum number of `GROUP BY time()` buckets that can be processed in a query. Setting this to 0 (the default) allows an unlimited number to be processed.", + Optional: true, + Type: schema.TypeInt, + }, + "max_select_point": { + Description: "The maximum number of points that can be processed in a SELECT statement. Setting this to 0 (the default) allows an unlimited number to be processed.", + Optional: true, + Type: schema.TypeInt, + }, + "query_log_enabled": { + Description: "Whether queries should be logged before execution. May log sensitive data contained within a query.", + Optional: true, + Type: schema.TypeBool, + }, + "query_timeout": { + Description: "The maximum duration in seconds before a query is killed. Setting this to 0 (the default) will never kill slow queries.", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "private_access": { + Description: "Allow access to selected service ports from private networks", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"influxdb": { + Description: "Allow clients to connect to influxdb with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "privatelink_access": { + Description: "Allow access to selected service components through Privatelink", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"influxdb": { + Description: "Enable influxdb.", + Optional: true, + Type: schema.TypeBool, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "project_to_fork_from": { + Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "public_access": { + Description: "Allow access to selected service ports from the public Internet", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"influxdb": { + Description: "Allow clients to connect to influxdb from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "recovery_basebackup_name": { + Description: "Name of the basebackup to restore in forked service.", + Optional: true, + Type: schema.TypeString, + }, + "service_to_fork_from": { + Description: "Name of another service to fork from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/kafka.go b/internal/sdkprovider/userconfig/service/kafka.go new file mode 100644 index 000000000..916e0cb84 --- /dev/null +++ b/internal/sdkprovider/userconfig/service/kafka.go @@ -0,0 +1,608 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func schemaKafka() *schema.Schema { + return &schema.Schema{ + Description: "Kafka user configurable settings", + DiffSuppressFunc: diffSuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "additional_backup_regions": { + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{ + Description: "Target cloud.", + Type: schema.TypeString, + }, + MaxItems: 1, + Optional: true, + Type: schema.TypeSet, + }, + "custom_domain": { + Description: "Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.", + Optional: true, + Type: schema.TypeString, + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "kafka": { + Description: "Kafka broker configuration values", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "auto_create_topics_enable": { + Description: "Enable auto creation of topics.", + Optional: true, + Type: schema.TypeBool, + }, + "compression_type": { + Description: "Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"gzip", "snappy", "lz4", "zstd", "uncompressed", "producer"}, false), + }, + "connections_max_idle_ms": { + Description: "Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.", + Optional: true, + Type: schema.TypeInt, + }, + "default_replication_factor": { + Description: "Replication factor for autocreated topics.", + Optional: true, + Type: schema.TypeInt, + }, + "group_initial_rebalance_delay_ms": { + Description: "The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.", + Optional: true, + Type: schema.TypeInt, + }, + "group_max_session_timeout_ms": { + Description: "The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.", + Optional: true, + Type: schema.TypeInt, + }, + "group_min_session_timeout_ms": { + Description: "The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.", + Optional: true, + Type: schema.TypeInt, + }, + "log_cleaner_delete_retention_ms": { + Description: "How long are delete records retained?", + Optional: true, + Type: schema.TypeInt, + }, + "log_cleaner_max_compaction_lag_ms": { + Description: "The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.", + Optional: true, + Type: schema.TypeInt, + }, + "log_cleaner_min_cleanable_ratio": { + Description: "Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.", + Optional: true, + Type: schema.TypeFloat, + }, + "log_cleaner_min_compaction_lag_ms": { + Description: "The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.", + Optional: true, + Type: schema.TypeInt, + }, + "log_cleanup_policy": { + Description: "The default cleanup policy for segments beyond the retention window.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"delete", "compact", "compact,delete"}, false), + }, + "log_flush_interval_messages": { + Description: "The number of messages accumulated on a log partition before messages are flushed to disk.", + Optional: true, + Type: schema.TypeInt, + }, + "log_flush_interval_ms": { + Description: "The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.", + Optional: true, + Type: schema.TypeInt, + }, + "log_index_interval_bytes": { + Description: "The interval with which Kafka adds an entry to the offset index.", + Optional: true, + Type: schema.TypeInt, + }, + "log_index_size_max_bytes": { + Description: "The maximum size in bytes of the offset index.", + Optional: true, + Type: schema.TypeInt, + }, + "log_message_downconversion_enable": { + Description: "This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .", + Optional: true, + Type: schema.TypeBool, + }, + "log_message_timestamp_difference_max_ms": { + Description: "The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.", + Optional: true, + Type: schema.TypeInt, + }, + "log_message_timestamp_type": { + Description: "Define whether the timestamp in the message is message create time or log append time.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"CreateTime", "LogAppendTime"}, false), + }, + "log_preallocate": { + Description: "Should pre allocate file when create new segment?", + Optional: true, + Type: schema.TypeBool, + }, + "log_retention_bytes": { + Description: "The maximum size of the log before deleting messages.", + Optional: true, + Type: schema.TypeInt, + }, + "log_retention_hours": { + Description: "The number of hours to keep a log file before deleting it.", + Optional: true, + Type: schema.TypeInt, + }, + "log_retention_ms": { + Description: "The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.", + Optional: true, + Type: schema.TypeInt, + }, + "log_roll_jitter_ms": { + Description: "The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.", + Optional: true, + Type: schema.TypeInt, + }, + "log_roll_ms": { + Description: "The maximum time before a new log segment is rolled out (in milliseconds).", + Optional: true, + Type: schema.TypeInt, + }, + "log_segment_bytes": { + Description: "The maximum size of a single log file.", + Optional: true, + Type: schema.TypeInt, + }, + "log_segment_delete_delay_ms": { + Description: "The amount of time to wait before deleting a file from the filesystem.", + Optional: true, + Type: schema.TypeInt, + }, + "max_connections_per_ip": { + Description: "The maximum number of connections allowed from each ip address (defaults to 2147483647).", + Optional: true, + Type: schema.TypeInt, + }, + "max_incremental_fetch_session_cache_slots": { + Description: "The maximum number of incremental fetch sessions that the broker will maintain.", + Optional: true, + Type: schema.TypeInt, + }, + "message_max_bytes": { + Description: "The maximum size of message that the server can receive.", + Optional: true, + Type: schema.TypeInt, + }, + "min_insync_replicas": { + Description: "When a producer sets acks to 'all' (or '-1'), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.", + Optional: true, + Type: schema.TypeInt, + }, + "num_partitions": { + Description: "Number of partitions for autocreated topics.", + Optional: true, + Type: schema.TypeInt, + }, + "offsets_retention_minutes": { + Description: "Log retention window in minutes for offsets topic.", + Optional: true, + Type: schema.TypeInt, + }, + "producer_purgatory_purge_interval_requests": { + Description: "The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).", + Optional: true, + Type: schema.TypeInt, + }, + "replica_fetch_max_bytes": { + Description: "The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.", + Optional: true, + Type: schema.TypeInt, + }, + "replica_fetch_response_max_bytes": { + Description: "Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.", + Optional: true, + Type: schema.TypeInt, + }, + "socket_request_max_bytes": { + Description: "The maximum number of bytes in a socket request (defaults to 104857600).", + Optional: true, + Type: schema.TypeInt, + }, + "transaction_remove_expired_transaction_cleanup_interval_ms": { + Description: "The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).", + Optional: true, + Type: schema.TypeInt, + }, + "transaction_state_log_segment_bytes": { + Description: "The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "kafka_authentication_methods": { + Description: "Kafka authentication methods", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "certificate": { + Default: true, + Description: "Enable certificate/SSL authentication. The default value is `true`.", + Optional: true, + Type: schema.TypeBool, + }, + "sasl": { + Default: false, + Description: "Enable SASL authentication. The default value is `false`.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "kafka_connect": { + Default: false, + Description: "Enable Kafka Connect service. The default value is `false`.", + Optional: true, + Type: schema.TypeBool, + }, + "kafka_connect_config": { + Description: "Kafka Connect configuration values", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "connector_client_config_override_policy": { + Description: "Defines what client configurations can be overridden by the connector. Default is None.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"None", "All"}, false), + }, + "consumer_auto_offset_reset": { + Description: "What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"earliest", "latest"}, false), + }, + "consumer_fetch_max_bytes": { + Description: "Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.", + Optional: true, + Type: schema.TypeInt, + }, + "consumer_isolation_level": { + Description: "Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"read_uncommitted", "read_committed"}, false), + }, + "consumer_max_partition_fetch_bytes": { + Description: "Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .", + Optional: true, + Type: schema.TypeInt, + }, + "consumer_max_poll_interval_ms": { + Description: "The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).", + Optional: true, + Type: schema.TypeInt, + }, + "consumer_max_poll_records": { + Description: "The maximum number of records returned in a single call to poll() (defaults to 500).", + Optional: true, + Type: schema.TypeInt, + }, + "offset_flush_interval_ms": { + Description: "The interval at which to try committing offsets for tasks (defaults to 60000).", + Optional: true, + Type: schema.TypeInt, + }, + "offset_flush_timeout_ms": { + Description: "Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).", + Optional: true, + Type: schema.TypeInt, + }, + "producer_batch_size": { + Description: "This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).", + Optional: true, + Type: schema.TypeInt, + }, + "producer_buffer_memory": { + Description: "The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).", + Optional: true, + Type: schema.TypeInt, + }, + "producer_compression_type": { + Description: "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"gzip", "snappy", "lz4", "zstd", "none"}, false), + }, + "producer_linger_ms": { + Description: "This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.", + Optional: true, + Type: schema.TypeInt, + }, + "producer_max_request_size": { + Description: "This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.", + Optional: true, + Type: schema.TypeInt, + }, + "scheduled_rebalance_max_delay_ms": { + Description: "The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.", + Optional: true, + Type: schema.TypeInt, + }, + "session_timeout_ms": { + Description: "The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "kafka_rest": { + Default: false, + Description: "Enable Kafka-REST service. The default value is `false`.", + Optional: true, + Type: schema.TypeBool, + }, + "kafka_rest_authorization": { + Description: "Enable authorization in Kafka-REST service.", + Optional: true, + Type: schema.TypeBool, + }, + "kafka_rest_config": { + Description: "Kafka REST configuration", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "consumer_enable_auto_commit": { + Default: true, + Description: "If true the consumer's offset will be periodically committed to Kafka in the background. The default value is `true`.", + Optional: true, + Type: schema.TypeBool, + }, + "consumer_request_max_bytes": { + Default: 67108864, + Description: "Maximum number of bytes in unencoded message keys and values by a single request. The default value is `67108864`.", + Optional: true, + Type: schema.TypeInt, + }, + "consumer_request_timeout_ms": { + Default: 1000, + Description: "The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.", + Optional: true, + Type: schema.TypeInt, + ValidateFunc: validation.IntInSlice([]int{1000, 15000, 30000}), + }, + "producer_acks": { + Default: "1", + Description: "The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"all", "-1", "0", "1"}, false), + }, + "producer_compression_type": { + Description: "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"gzip", "snappy", "lz4", "zstd", "none"}, false), + }, + "producer_linger_ms": { + Default: 0, + Description: "Wait for up to the given delay to allow batching records together. The default value is `0`.", + Optional: true, + Type: schema.TypeInt, + }, + "producer_max_request_size": { + Default: 1048576, + Description: "The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. The default value is `1048576`.", + Optional: true, + Type: schema.TypeInt, + }, + "simpleconsumer_pool_size_max": { + Default: 25, + Description: "Maximum number of SimpleConsumers that can be instantiated per broker. The default value is `25`.", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "kafka_version": { + Description: "Kafka major version.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"3.2", "3.3", "3.1", "3.4", "3.5"}, false), + }, + "private_access": { + Description: "Allow access to selected service ports from private networks", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "kafka": { + Description: "Allow clients to connect to kafka with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "kafka_connect": { + Description: "Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "kafka_rest": { + Description: "Allow clients to connect to kafka_rest with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "schema_registry": { + Description: "Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "privatelink_access": { + Description: "Allow access to selected service components through Privatelink", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "jolokia": { + Description: "Enable jolokia.", + Optional: true, + Type: schema.TypeBool, + }, + "kafka": { + Description: "Enable kafka.", + Optional: true, + Type: schema.TypeBool, + }, + "kafka_connect": { + Description: "Enable kafka_connect.", + Optional: true, + Type: schema.TypeBool, + }, + "kafka_rest": { + Description: "Enable kafka_rest.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Enable prometheus.", + Optional: true, + Type: schema.TypeBool, + }, + "schema_registry": { + Description: "Enable schema_registry.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "public_access": { + Description: "Allow access to selected service ports from the public Internet", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "kafka": { + Description: "Allow clients to connect to kafka from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "kafka_connect": { + Description: "Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "kafka_rest": { + Description: "Allow clients to connect to kafka_rest from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "schema_registry": { + Description: "Allow clients to connect to schema_registry from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "schema_registry": { + Default: false, + Description: "Enable Schema-Registry service. The default value is `false`.", + Optional: true, + Type: schema.TypeBool, + }, + "schema_registry_config": { + Description: "Schema Registry configuration", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "leader_eligibility": { + Description: "If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to `true`.", + Optional: true, + Type: schema.TypeBool, + }, + "topic_name": { + Description: "The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to `_schemas`.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/kafka_connect.go b/internal/sdkprovider/userconfig/service/kafka_connect.go new file mode 100644 index 000000000..85dae72b1 --- /dev/null +++ b/internal/sdkprovider/userconfig/service/kafka_connect.go @@ -0,0 +1,227 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func schemaKafkaConnect() *schema.Schema { + return &schema.Schema{ + Description: "KafkaConnect user configurable settings", + DiffSuppressFunc: diffSuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "additional_backup_regions": { + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{ + Description: "Target cloud.", + Type: schema.TypeString, + }, + MaxItems: 1, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "kafka_connect": { + Description: "Kafka Connect configuration values", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "connector_client_config_override_policy": { + Description: "Defines what client configurations can be overridden by the connector. Default is None.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"None", "All"}, false), + }, + "consumer_auto_offset_reset": { + Description: "What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"earliest", "latest"}, false), + }, + "consumer_fetch_max_bytes": { + Description: "Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.", + Optional: true, + Type: schema.TypeInt, + }, + "consumer_isolation_level": { + Description: "Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"read_uncommitted", "read_committed"}, false), + }, + "consumer_max_partition_fetch_bytes": { + Description: "Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .", + Optional: true, + Type: schema.TypeInt, + }, + "consumer_max_poll_interval_ms": { + Description: "The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).", + Optional: true, + Type: schema.TypeInt, + }, + "consumer_max_poll_records": { + Description: "The maximum number of records returned in a single call to poll() (defaults to 500).", + Optional: true, + Type: schema.TypeInt, + }, + "offset_flush_interval_ms": { + Description: "The interval at which to try committing offsets for tasks (defaults to 60000).", + Optional: true, + Type: schema.TypeInt, + }, + "offset_flush_timeout_ms": { + Description: "Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).", + Optional: true, + Type: schema.TypeInt, + }, + "producer_batch_size": { + Description: "This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).", + Optional: true, + Type: schema.TypeInt, + }, + "producer_buffer_memory": { + Description: "The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).", + Optional: true, + Type: schema.TypeInt, + }, + "producer_compression_type": { + Description: "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"gzip", "snappy", "lz4", "zstd", "none"}, false), + }, + "producer_linger_ms": { + Description: "This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.", + Optional: true, + Type: schema.TypeInt, + }, + "producer_max_request_size": { + Description: "This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.", + Optional: true, + Type: schema.TypeInt, + }, + "scheduled_rebalance_max_delay_ms": { + Description: "The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.", + Optional: true, + Type: schema.TypeInt, + }, + "session_timeout_ms": { + Description: "The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "private_access": { + Description: "Allow access to selected service ports from private networks", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "kafka_connect": { + Description: "Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "privatelink_access": { + Description: "Allow access to selected service components through Privatelink", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "jolokia": { + Description: "Enable jolokia.", + Optional: true, + Type: schema.TypeBool, + }, + "kafka_connect": { + Description: "Enable kafka_connect.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Enable prometheus.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "public_access": { + Description: "Allow access to selected service ports from the public Internet", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "kafka_connect": { + Description: "Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/kafka_mirrormaker.go b/internal/sdkprovider/userconfig/service/kafka_mirrormaker.go new file mode 100644 index 000000000..7a66c0c75 --- /dev/null +++ b/internal/sdkprovider/userconfig/service/kafka_mirrormaker.go @@ -0,0 +1,132 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + +func schemaKafkaMirrormaker() *schema.Schema { + return &schema.Schema{ + Description: "KafkaMirrormaker user configurable settings", + DiffSuppressFunc: diffSuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "additional_backup_regions": { + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{ + Description: "Target cloud.", + Type: schema.TypeString, + }, + MaxItems: 1, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "kafka_mirrormaker": { + Description: "Kafka MirrorMaker configuration values", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "emit_checkpoints_enabled": { + Description: "Whether to emit consumer group offset checkpoints to target cluster periodically (default: true).", + Optional: true, + Type: schema.TypeBool, + }, + "emit_checkpoints_interval_seconds": { + Description: "Frequency at which consumer group offset checkpoints are emitted (default: 60, every minute).", + Optional: true, + Type: schema.TypeInt, + }, + "refresh_groups_enabled": { + Description: "Whether to periodically check for new consumer groups. Defaults to 'true'.", + Optional: true, + Type: schema.TypeBool, + }, + "refresh_groups_interval_seconds": { + Description: "Frequency of consumer group refresh in seconds. Defaults to 600 seconds (10 minutes).", + Optional: true, + Type: schema.TypeInt, + }, + "refresh_topics_enabled": { + Description: "Whether to periodically check for new topics and partitions. Defaults to 'true'.", + Optional: true, + Type: schema.TypeBool, + }, + "refresh_topics_interval_seconds": { + Description: "Frequency of topic and partitions refresh in seconds. Defaults to 600 seconds (10 minutes).", + Optional: true, + Type: schema.TypeInt, + }, + "sync_group_offsets_enabled": { + Description: "Whether to periodically write the translated offsets of replicated consumer groups (in the source cluster) to __consumer_offsets topic in target cluster, as long as no active consumers in that group are connected to the target cluster.", + Optional: true, + Type: schema.TypeBool, + }, + "sync_group_offsets_interval_seconds": { + Description: "Frequency at which consumer group offsets are synced (default: 60, every minute).", + Optional: true, + Type: schema.TypeInt, + }, + "sync_topic_configs_enabled": { + Description: "Whether to periodically configure remote topics to match their corresponding upstream topics.", + Optional: true, + Type: schema.TypeBool, + }, + "tasks_max_per_cpu": { + Default: 1, + Description: "'tasks.max' is set to this multiplied by the number of CPUs in the service. The default value is `1`.", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/m3aggregator.go b/internal/sdkprovider/userconfig/service/m3aggregator.go new file mode 100644 index 000000000..d33ad6027 --- /dev/null +++ b/internal/sdkprovider/userconfig/service/m3aggregator.go @@ -0,0 +1,83 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func schemaM3aggregator() *schema.Schema { + return &schema.Schema{ + Description: "M3aggregator user configurable settings", + DiffSuppressFunc: diffSuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "custom_domain": { + Description: "Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.", + Optional: true, + Type: schema.TypeString, + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "m3_version": { + Description: "M3 major version (deprecated, use m3aggregator_version).", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"1.1", "1.2", "1.5"}, false), + }, + "m3aggregator_version": { + Description: "M3 major version (the minimum compatible version).", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"1.1", "1.2", "1.5"}, false), + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/m3db.go b/internal/sdkprovider/userconfig/service/m3db.go new file mode 100644 index 000000000..ea593d553 --- /dev/null +++ b/internal/sdkprovider/userconfig/service/m3db.go @@ -0,0 +1,366 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func schemaM3db() *schema.Schema { + return &schema.Schema{ + Description: "M3db user configurable settings", + DiffSuppressFunc: diffSuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "additional_backup_regions": { + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{ + Description: "Target cloud.", + Type: schema.TypeString, + }, + MaxItems: 1, + Optional: true, + Type: schema.TypeSet, + }, + "custom_domain": { + Description: "Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.", + Optional: true, + Type: schema.TypeString, + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "limits": { + Description: "M3 limits", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "max_recently_queried_series_blocks": { + Description: "The maximum number of blocks that can be read in a given lookback period.", + Optional: true, + Type: schema.TypeInt, + }, + "max_recently_queried_series_disk_bytes_read": { + Description: "The maximum number of disk bytes that can be read in a given lookback period.", + Optional: true, + Type: schema.TypeInt, + }, + "max_recently_queried_series_lookback": { + Description: "The lookback period for 'max_recently_queried_series_blocks' and 'max_recently_queried_series_disk_bytes_read'.", + Optional: true, + Type: schema.TypeString, + }, + "query_docs": { + Description: "The maximum number of docs fetched in single query.", + Optional: true, + Type: schema.TypeInt, + }, + "query_require_exhaustive": { + Description: "When query limits are exceeded, whether to return error or return partial results.", + Optional: true, + Type: schema.TypeBool, + }, + "query_series": { + Description: "The maximum number of series fetched in single query.", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "m3": { + Description: "M3 specific configuration options", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"tag_options": { + Description: "M3 Tag Options", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "allow_tag_name_duplicates": { + Description: "Allows for duplicate tags to appear on series (not allowed by default).", + Optional: true, + Type: schema.TypeBool, + }, + "allow_tag_value_empty": { + Description: "Allows for empty tags to appear on series (not allowed by default).", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "m3_version": { + Description: "M3 major version (deprecated, use m3db_version).", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"1.1", "1.2", "1.5"}, false), + }, + "m3coordinator_enable_graphite_carbon_ingest": { + Description: "Enables access to Graphite Carbon plaintext metrics ingestion. It can be enabled only for services inside VPCs. The metrics are written to aggregated namespaces only.", + Optional: true, + Type: schema.TypeBool, + }, + "m3db_version": { + Description: "M3 major version (the minimum compatible version).", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"1.1", "1.2", "1.5"}, false), + }, + "namespaces": { + Description: "List of M3 namespaces", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "name": { + Description: "The name of the namespace.", + Required: true, + Type: schema.TypeString, + }, + "options": { + Description: "Namespace options", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "retention_options": { + Description: "Retention options", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "block_data_expiry_duration": { + Description: "Controls how long we wait before expiring stale data.", + Optional: true, + Type: schema.TypeString, + }, + "blocksize_duration": { + Description: "Controls how long to keep a block in memory before flushing to a fileset on disk.", + Optional: true, + Type: schema.TypeString, + }, + "buffer_future_duration": { + Description: "Controls how far into the future writes to the namespace will be accepted.", + Optional: true, + Type: schema.TypeString, + }, + "buffer_past_duration": { + Description: "Controls how far into the past writes to the namespace will be accepted.", + Optional: true, + Type: schema.TypeString, + }, + "retention_period_duration": { + Description: "Controls the duration of time that M3DB will retain data for the namespace.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Required: true, + Type: schema.TypeList, + }, + "snapshot_enabled": { + Description: "Controls whether M3DB will create snapshot files for this namespace.", + Optional: true, + Type: schema.TypeBool, + }, + "writes_to_commitlog": { + Description: "Controls whether M3DB will include writes to this namespace in the commitlog.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "resolution": { + Description: "The resolution for an aggregated namespace.", + Optional: true, + Type: schema.TypeString, + }, + "type": { + Description: "The type of aggregation (aggregated/unaggregated).", + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"aggregated", "unaggregated"}, false), + }, + }}, + MaxItems: 2147483647, + Optional: true, + Type: schema.TypeList, + }, + "private_access": { + Description: "Allow access to selected service ports from private networks", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"m3coordinator": { + Description: "Allow clients to connect to m3coordinator with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "project_to_fork_from": { + Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "public_access": { + Description: "Allow access to selected service ports from the public Internet", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"m3coordinator": { + Description: "Allow clients to connect to m3coordinator from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "rules": { + Description: "M3 rules", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"mapping": { + Description: "List of M3 mapping rules", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "aggregations": { + Description: "List of aggregations to be applied.", + Elem: &schema.Schema{ + Description: "Aggregation to be applied.", + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"Count", "Last", "Max", "Mean", "Median", "Min", "P10", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "P90", "P95", "P99", "P999", "P9999", "Stdev", "Sum", "SumSq"}, false), + }, + MaxItems: 10, + Optional: true, + Type: schema.TypeSet, + }, + "drop": { + Description: "Only store the derived metric (as specified in the roll-up rules), if any.", + Optional: true, + Type: schema.TypeBool, + }, + "filter": { + Description: "Matching metric names with wildcards (using __name__:wildcard) or matching tags and their (optionally wildcarded) values. For value, ! can be used at start of value for negation, and multiple filters can be supplied using space as separator.", + Required: true, + Type: schema.TypeString, + }, + "name": { + Description: "The (optional) name of the rule.", + Optional: true, + Type: schema.TypeString, + }, + "namespaces": { + Deprecated: "Deprecated. Use `namespaces_string` instead.", + Description: "This rule will be used to store the metrics in the given namespace(s). If a namespace is target of rules, the global default aggregation will be automatically disabled. Note that specifying filters that match no namespaces whatsoever will be returned as an error. Filter the namespace by glob (=wildcards).", + Elem: &schema.Schema{ + Description: "Filter the namespace by glob (=wildcards).", + Type: schema.TypeString, + }, + MaxItems: 10, + Optional: true, + Type: schema.TypeSet, + }, + "namespaces_object": { + Description: "This rule will be used to store the metrics in the given namespace(s). If a namespace is target of rules, the global default aggregation will be automatically disabled. Note that specifying filters that match no namespaces whatsoever will be returned as an error. Filter the namespace by exact match of retention period and resolution", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "resolution": { + Description: "The resolution for the matching namespace.", + Required: true, + Type: schema.TypeString, + }, + "retention": { + Description: "The retention period of the matching namespace.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 10, + Optional: true, + Type: schema.TypeList, + }, + "namespaces_string": { + Description: "This rule will be used to store the metrics in the given namespace(s). If a namespace is target of rules, the global default aggregation will be automatically disabled. Note that specifying filters that match no namespaces whatsoever will be returned as an error. Filter the namespace by glob (=wildcards).", + Elem: &schema.Schema{ + Description: "Filter the namespace by glob (=wildcards).", + Type: schema.TypeString, + }, + MaxItems: 10, + Optional: true, + Type: schema.TypeSet, + }, + "tags": { + Description: "List of tags to be appended to matching metrics", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "name": { + Description: "Name of the tag.", + Required: true, + Type: schema.TypeString, + }, + "value": { + Description: "Value of the tag.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 10, + Optional: true, + Type: schema.TypeList, + }, + }}, + MaxItems: 10, + Optional: true, + Type: schema.TypeList, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "service_to_fork_from": { + Description: "Name of another service to fork from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/mysql.go b/internal/sdkprovider/userconfig/service/mysql.go new file mode 100644 index 000000000..c2b8927c0 --- /dev/null +++ b/internal/sdkprovider/userconfig/service/mysql.go @@ -0,0 +1,407 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func schemaMysql() *schema.Schema { + return &schema.Schema{ + Description: "Mysql user configurable settings", + DiffSuppressFunc: diffSuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "additional_backup_regions": { + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{ + Description: "Target cloud.", + Type: schema.TypeString, + }, + MaxItems: 1, + Optional: true, + Type: schema.TypeSet, + }, + "admin_password": { + Description: "Custom password for admin user. Defaults to random string. This must be set only when a new service is being created.", + ForceNew: true, + Optional: true, + Sensitive: true, + Type: schema.TypeString, + }, + "admin_username": { + Description: "Custom username for admin user. This must be set only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "backup_hour": { + Description: "The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed.", + Optional: true, + Type: schema.TypeInt, + }, + "backup_minute": { + Description: "The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed.", + Optional: true, + Type: schema.TypeInt, + }, + "binlog_retention_period": { + Description: "The minimum amount of time in seconds to keep binlog entries before deletion. This may be extended for services that require binlog entries for longer than the default for example if using the MySQL Debezium Kafka connector.", + Optional: true, + Type: schema.TypeInt, + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "migration": { + Description: "Migrate data from existing server", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "dbname": { + Description: "Database name for bootstrapping the initial connection.", + Optional: true, + Type: schema.TypeString, + }, + "host": { + Description: "Hostname or IP address of the server where to migrate data from.", + Required: true, + Type: schema.TypeString, + }, + "ignore_dbs": { + Description: "Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).", + Optional: true, + Type: schema.TypeString, + }, + "method": { + Description: "The migration method to be used (currently supported only by Redis, MySQL and PostgreSQL service types).", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"dump", "replication"}, false), + }, + "password": { + Description: "Password for authentication with the server where to migrate data from.", + Optional: true, + Sensitive: true, + Type: schema.TypeString, + }, + "port": { + Description: "Port number of the server where to migrate data from.", + Required: true, + Type: schema.TypeInt, + }, + "ssl": { + Default: true, + Description: "The server where to migrate data from is secured with SSL. The default value is `true`.", + Optional: true, + Type: schema.TypeBool, + }, + "username": { + Description: "User name for authentication with the server where to migrate data from.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "mysql": { + Description: "mysql.conf configuration values", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "connect_timeout": { + Description: "The number of seconds that the mysqld server waits for a connect packet before responding with Bad handshake.", + Optional: true, + Type: schema.TypeInt, + }, + "default_time_zone": { + Description: "Default server time zone as an offset from UTC (from -12:00 to +12:00), a time zone name, or 'SYSTEM' to use the MySQL server default.", + Optional: true, + Type: schema.TypeString, + }, + "group_concat_max_len": { + Description: "The maximum permitted result length in bytes for the GROUP_CONCAT() function.", + Optional: true, + Type: schema.TypeInt, + }, + "information_schema_stats_expiry": { + Description: "The time, in seconds, before cached statistics expire.", + Optional: true, + Type: schema.TypeInt, + }, + "innodb_change_buffer_max_size": { + Description: "Maximum size for the InnoDB change buffer, as a percentage of the total size of the buffer pool. Default is 25.", + Optional: true, + Type: schema.TypeInt, + }, + "innodb_flush_neighbors": { + Description: "Specifies whether flushing a page from the InnoDB buffer pool also flushes other dirty pages in the same extent (default is 1): 0 - dirty pages in the same extent are not flushed, 1 - flush contiguous dirty pages in the same extent, 2 - flush dirty pages in the same extent.", + Optional: true, + Type: schema.TypeInt, + }, + "innodb_ft_min_token_size": { + Description: "Minimum length of words that are stored in an InnoDB FULLTEXT index. Changing this parameter will lead to a restart of the MySQL service.", + Optional: true, + Type: schema.TypeInt, + }, + "innodb_ft_server_stopword_table": { + Description: "This option is used to specify your own InnoDB FULLTEXT index stopword list for all InnoDB tables.", + Optional: true, + Type: schema.TypeString, + }, + "innodb_lock_wait_timeout": { + Description: "The length of time in seconds an InnoDB transaction waits for a row lock before giving up. Default is 120.", + Optional: true, + Type: schema.TypeInt, + }, + "innodb_log_buffer_size": { + Description: "The size in bytes of the buffer that InnoDB uses to write to the log files on disk.", + Optional: true, + Type: schema.TypeInt, + }, + "innodb_online_alter_log_max_size": { + Description: "The upper limit in bytes on the size of the temporary log files used during online DDL operations for InnoDB tables.", + Optional: true, + Type: schema.TypeInt, + }, + "innodb_print_all_deadlocks": { + Description: "When enabled, information about all deadlocks in InnoDB user transactions is recorded in the error log. Disabled by default.", + Optional: true, + Type: schema.TypeBool, + }, + "innodb_read_io_threads": { + Description: "The number of I/O threads for read operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.", + Optional: true, + Type: schema.TypeInt, + }, + "innodb_rollback_on_timeout": { + Description: "When enabled a transaction timeout causes InnoDB to abort and roll back the entire transaction. Changing this parameter will lead to a restart of the MySQL service.", + Optional: true, + Type: schema.TypeBool, + }, + "innodb_thread_concurrency": { + Description: "Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit).", + Optional: true, + Type: schema.TypeInt, + }, + "innodb_write_io_threads": { + Description: "The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.", + Optional: true, + Type: schema.TypeInt, + }, + "interactive_timeout": { + Description: "The number of seconds the server waits for activity on an interactive connection before closing it.", + Optional: true, + Type: schema.TypeInt, + }, + "internal_tmp_mem_storage_engine": { + Description: "The storage engine for in-memory internal temporary tables.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"TempTable", "MEMORY"}, false), + }, + "long_query_time": { + Description: "The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Default is 10s.", + Optional: true, + Type: schema.TypeFloat, + }, + "max_allowed_packet": { + Description: "Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M).", + Optional: true, + Type: schema.TypeInt, + }, + "max_heap_table_size": { + Description: "Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M).", + Optional: true, + Type: schema.TypeInt, + }, + "net_buffer_length": { + Description: "Start sizes of connection buffer and result buffer. Default is 16384 (16K). Changing this parameter will lead to a restart of the MySQL service.", + Optional: true, + Type: schema.TypeInt, + }, + "net_read_timeout": { + Description: "The number of seconds to wait for more data from a connection before aborting the read.", + Optional: true, + Type: schema.TypeInt, + }, + "net_write_timeout": { + Description: "The number of seconds to wait for a block to be written to a connection before aborting the write.", + Optional: true, + Type: schema.TypeInt, + }, + "slow_query_log": { + Description: "Slow query log enables capturing of slow queries. Setting slow_query_log to false also truncates the mysql.slow_log table. Default is off.", + Optional: true, + Type: schema.TypeBool, + }, + "sort_buffer_size": { + Description: "Sort buffer size in bytes for ORDER BY optimization. Default is 262144 (256K).", + Optional: true, + Type: schema.TypeInt, + }, + "sql_mode": { + Description: "Global SQL mode. Set to empty to use MySQL server defaults. When creating a new service and not setting this field Aiven default SQL mode (strict, SQL standard compliant) will be assigned.", + Optional: true, + Type: schema.TypeString, + }, + "sql_require_primary_key": { + Description: "Require primary key to be defined for new tables or old tables modified with ALTER TABLE and fail if missing. It is recommended to always have primary keys because various functionality may break if any large table is missing them.", + Optional: true, + Type: schema.TypeBool, + }, + "tmp_table_size": { + Description: "Limits the size of internal in-memory tables. Also set max_heap_table_size. Default is 16777216 (16M).", + Optional: true, + Type: schema.TypeInt, + }, + "wait_timeout": { + Description: "The number of seconds the server waits for activity on a noninteractive connection before closing it.", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "mysql_version": { + Description: "MySQL major version.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"8"}, false), + }, + "private_access": { + Description: "Allow access to selected service ports from private networks", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "mysql": { + Description: "Allow clients to connect to mysql with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "mysqlx": { + Description: "Allow clients to connect to mysqlx with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "privatelink_access": { + Description: "Allow access to selected service components through Privatelink", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "mysql": { + Description: "Enable mysql.", + Optional: true, + Type: schema.TypeBool, + }, + "mysqlx": { + Description: "Enable mysqlx.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Enable prometheus.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "project_to_fork_from": { + Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "public_access": { + Description: "Allow access to selected service ports from the public Internet", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "mysql": { + Description: "Allow clients to connect to mysql from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "mysqlx": { + Description: "Allow clients to connect to mysqlx from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "recovery_target_time": { + Description: "Recovery target time when forking a service. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "service_to_fork_from": { + Description: "Name of another service to fork from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/opensearch.go b/internal/sdkprovider/userconfig/service/opensearch.go new file mode 100644 index 000000000..8a21d5a8a --- /dev/null +++ b/internal/sdkprovider/userconfig/service/opensearch.go @@ -0,0 +1,547 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func schemaOpensearch() *schema.Schema { + return &schema.Schema{ + Description: "Opensearch user configurable settings", + DiffSuppressFunc: diffSuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "additional_backup_regions": { + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{ + Description: "Target cloud.", + Type: schema.TypeString, + }, + MaxItems: 1, + Optional: true, + Type: schema.TypeSet, + }, + "custom_domain": { + Description: "Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.", + Optional: true, + Type: schema.TypeString, + }, + "disable_replication_factor_adjustment": { + Description: "Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can no longer be activated.", + Optional: true, + Type: schema.TypeBool, + }, + "index_patterns": { + Description: "Index patterns", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "max_index_count": { + Description: "Maximum number of indexes to keep.", + Required: true, + Type: schema.TypeInt, + }, + "pattern": { + Description: "fnmatch pattern.", + Required: true, + Type: schema.TypeString, + }, + "sorting_algorithm": { + Default: "creation_date", + Description: "Deletion sorting algorithm. The default value is `creation_date`.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"alphabetical", "creation_date"}, false), + }, + }}, + MaxItems: 512, + Optional: true, + Type: schema.TypeList, + }, + "index_template": { + Description: "Template settings for all new indexes", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "mapping_nested_objects_limit": { + Description: "The maximum number of nested JSON objects that a single document can contain across all nested types. This limit helps to prevent out of memory errors when a document contains too many nested objects. Default is 10000.", + Optional: true, + Type: schema.TypeInt, + }, + "number_of_replicas": { + Description: "The number of replicas each primary shard has.", + Optional: true, + Type: schema.TypeInt, + }, + "number_of_shards": { + Description: "The number of primary shards that an index should have.", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "keep_index_refresh_interval": { + Description: "Aiven automation resets index.refresh_interval to default value for every index to be sure that indices are always visible to search. If it doesn't fit your case, you can disable this by setting up this flag to true.", + Optional: true, + Type: schema.TypeBool, + }, + "max_index_count": { + Default: 0, + Description: "use index_patterns instead. The default value is `0`.", + Optional: true, + Type: schema.TypeInt, + }, + "openid": { + Description: "OpenSearch OpenID Connect Configuration", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "client_id": { + Description: "The ID of the OpenID Connect client configured in your IdP. Required.", + Required: true, + Type: schema.TypeString, + }, + "client_secret": { + Description: "The client secret of the OpenID Connect client configured in your IdP. Required.", + Required: true, + Type: schema.TypeString, + }, + "connect_url": { + Description: "The URL of your IdP where the Security plugin can find the OpenID Connect metadata/configuration settings.", + Required: true, + Type: schema.TypeString, + }, + "enabled": { + Default: true, + Description: "Enables or disables OpenID Connect authentication for OpenSearch. When enabled, users can authenticate using OpenID Connect with an Identity Provider. The default value is `true`.", + Optional: true, + Type: schema.TypeBool, + }, + "header": { + Default: "Authorization", + Description: "HTTP header name of the JWT token. Optional. Default is Authorization. The default value is `Authorization`.", + Optional: true, + Type: schema.TypeString, + }, + "jwt_header": { + Description: "The HTTP header that stores the token. Typically the Authorization header with the Bearer schema: Authorization: Bearer . Optional. Default is Authorization.", + Optional: true, + Type: schema.TypeString, + }, + "jwt_url_parameter": { + Description: "If the token is not transmitted in the HTTP header, but as an URL parameter, define the name of the parameter here. Optional.", + Optional: true, + Type: schema.TypeString, + }, + "refresh_rate_limit_count": { + Default: 10, + Description: "The maximum number of unknown key IDs in the time frame. Default is 10. Optional. The default value is `10`.", + Optional: true, + Type: schema.TypeInt, + }, + "refresh_rate_limit_time_window_ms": { + Default: 10000, + Description: "The time frame to use when checking the maximum number of unknown key IDs, in milliseconds. Optional.Default is 10000 (10 seconds). The default value is `10000`.", + Optional: true, + Type: schema.TypeInt, + }, + "roles_key": { + Description: "The key in the JSON payload that stores the user’s roles. The value of this key must be a comma-separated list of roles. Required only if you want to use roles in the JWT.", + Optional: true, + Type: schema.TypeString, + }, + "scope": { + Description: "The scope of the identity token issued by the IdP. Optional. Default is openid profile email address phone.", + Optional: true, + Type: schema.TypeString, + }, + "subject_key": { + Description: "The key in the JSON payload that stores the user’s name. If not defined, the subject registered claim is used. Most IdP providers use the preferred_username claim. Optional.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "opensearch": { + Description: "OpenSearch settings", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "action_auto_create_index_enabled": { + Description: "Explicitly allow or block automatic creation of indices. Defaults to true.", + Optional: true, + Type: schema.TypeBool, + }, + "action_destructive_requires_name": { + Description: "Require explicit index names when deleting.", + Optional: true, + Type: schema.TypeBool, + }, + "cluster_max_shards_per_node": { + Description: "Controls the number of shards allowed in the cluster per data node.", + Optional: true, + Type: schema.TypeInt, + }, + "cluster_routing_allocation_node_concurrent_recoveries": { + Description: "How many concurrent incoming/outgoing shard recoveries (normally replicas) are allowed to happen on a node. Defaults to 2.", + Optional: true, + Type: schema.TypeInt, + }, + "email_sender_name": { + Description: "Sender name placeholder to be used in Opensearch Dashboards and Opensearch keystore.", + Optional: true, + Type: schema.TypeString, + }, + "email_sender_password": { + Description: "Sender password for Opensearch alerts to authenticate with SMTP server.", + Optional: true, + Sensitive: true, + Type: schema.TypeString, + }, + "email_sender_username": { + Description: "Sender username for Opensearch alerts.", + Optional: true, + Type: schema.TypeString, + }, + "http_max_content_length": { + Description: "Maximum content length for HTTP requests to the OpenSearch HTTP API, in bytes.", + Optional: true, + Type: schema.TypeInt, + }, + "http_max_header_size": { + Description: "The max size of allowed headers, in bytes.", + Optional: true, + Type: schema.TypeInt, + }, + "http_max_initial_line_length": { + Description: "The max length of an HTTP URL, in bytes.", + Optional: true, + Type: schema.TypeInt, + }, + "indices_fielddata_cache_size": { + Description: "Relative amount. Maximum amount of heap memory used for field data cache. This is an expert setting; decreasing the value too much will increase overhead of loading field data; too much memory used for field data cache will decrease amount of heap available for other operations.", + Optional: true, + Type: schema.TypeInt, + }, + "indices_memory_index_buffer_size": { + Description: "Percentage value. Default is 10%. Total amount of heap used for indexing buffer, before writing segments to disk. This is an expert setting. Too low value will slow down indexing; too high value will increase indexing performance but causes performance issues for query performance.", + Optional: true, + Type: schema.TypeInt, + }, + "indices_queries_cache_size": { + Description: "Percentage value. Default is 10%. Maximum amount of heap used for query cache. This is an expert setting. Too low value will decrease query performance and increase performance for other operations; too high value will cause issues with other OpenSearch functionality.", + Optional: true, + Type: schema.TypeInt, + }, + "indices_query_bool_max_clause_count": { + Description: "Maximum number of clauses Lucene BooleanQuery can have. The default value (1024) is relatively high, and increasing it may cause performance issues. Investigate other approaches first before increasing this value.", + Optional: true, + Type: schema.TypeInt, + }, + "indices_recovery_max_bytes_per_sec": { + Description: "Limits total inbound and outbound recovery traffic for each node. Applies to both peer recoveries as well as snapshot recoveries (i.e., restores from a snapshot). Defaults to 40mb.", + Optional: true, + Type: schema.TypeInt, + }, + "indices_recovery_max_concurrent_file_chunks": { + Description: "Number of file chunks sent in parallel for each recovery. Defaults to 2.", + Optional: true, + Type: schema.TypeInt, + }, + "override_main_response_version": { + Description: "Compatibility mode sets OpenSearch to report its version as 7.10 so clients continue to work. Default is false.", + Optional: true, + Type: schema.TypeBool, + }, + "reindex_remote_whitelist": { + Description: "Whitelisted addresses for reindexing. Changing this value will cause all OpenSearch instances to restart.", + Elem: &schema.Schema{ + Description: "Address (hostname:port or IP:port).", + Type: schema.TypeString, + }, + MaxItems: 32, + Optional: true, + Type: schema.TypeSet, + }, + "script_max_compilations_rate": { + Description: "Script compilation circuit breaker limits the number of inline script compilations within a period of time. Default is use-context.", + Optional: true, + Type: schema.TypeString, + }, + "search_max_buckets": { + Description: "Maximum number of aggregation buckets allowed in a single response. OpenSearch default value is used when this is not defined.", + Optional: true, + Type: schema.TypeInt, + }, + "thread_pool_analyze_queue_size": { + Description: "Size for the thread pool queue. See documentation for exact details.", + Optional: true, + Type: schema.TypeInt, + }, + "thread_pool_analyze_size": { + Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", + Optional: true, + Type: schema.TypeInt, + }, + "thread_pool_force_merge_size": { + Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", + Optional: true, + Type: schema.TypeInt, + }, + "thread_pool_get_queue_size": { + Description: "Size for the thread pool queue. See documentation for exact details.", + Optional: true, + Type: schema.TypeInt, + }, + "thread_pool_get_size": { + Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", + Optional: true, + Type: schema.TypeInt, + }, + "thread_pool_search_queue_size": { + Description: "Size for the thread pool queue. See documentation for exact details.", + Optional: true, + Type: schema.TypeInt, + }, + "thread_pool_search_size": { + Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", + Optional: true, + Type: schema.TypeInt, + }, + "thread_pool_search_throttled_queue_size": { + Description: "Size for the thread pool queue. See documentation for exact details.", + Optional: true, + Type: schema.TypeInt, + }, + "thread_pool_search_throttled_size": { + Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", + Optional: true, + Type: schema.TypeInt, + }, + "thread_pool_write_queue_size": { + Description: "Size for the thread pool queue. See documentation for exact details.", + Optional: true, + Type: schema.TypeInt, + }, + "thread_pool_write_size": { + Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "opensearch_dashboards": { + Description: "OpenSearch Dashboards settings", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "enabled": { + Default: true, + Description: "Enable or disable OpenSearch Dashboards. The default value is `true`.", + Optional: true, + Type: schema.TypeBool, + }, + "max_old_space_size": { + Default: 128, + Description: "Limits the maximum amount of memory (in MiB) the OpenSearch Dashboards process can use. This sets the max_old_space_size option of the nodejs running the OpenSearch Dashboards. Note: the memory reserved by OpenSearch Dashboards is not available for OpenSearch. The default value is `128`.", + Optional: true, + Type: schema.TypeInt, + }, + "opensearch_request_timeout": { + Default: 30000, + Description: "Timeout in milliseconds for requests made by OpenSearch Dashboards towards OpenSearch. The default value is `30000`.", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "opensearch_version": { + Description: "OpenSearch major version.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"1", "2"}, false), + }, + "private_access": { + Description: "Allow access to selected service ports from private networks", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "opensearch": { + Description: "Allow clients to connect to opensearch with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "opensearch_dashboards": { + Description: "Allow clients to connect to opensearch_dashboards with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "privatelink_access": { + Description: "Allow access to selected service components through Privatelink", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "opensearch": { + Description: "Enable opensearch.", + Optional: true, + Type: schema.TypeBool, + }, + "opensearch_dashboards": { + Description: "Enable opensearch_dashboards.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Enable prometheus.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "project_to_fork_from": { + Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "public_access": { + Description: "Allow access to selected service ports from the public Internet", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "opensearch": { + Description: "Allow clients to connect to opensearch from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "opensearch_dashboards": { + Description: "Allow clients to connect to opensearch_dashboards from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "recovery_basebackup_name": { + Description: "Name of the basebackup to restore in forked service.", + Optional: true, + Type: schema.TypeString, + }, + "saml": { + Description: "OpenSearch SAML configuration", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "enabled": { + Description: "Enables or disables SAML-based authentication for OpenSearch. When enabled, users can authenticate using SAML with an Identity Provider. The default value is `true`.", + Required: true, + Type: schema.TypeBool, + }, + "idp_entity_id": { + Description: "The unique identifier for the Identity Provider (IdP) entity that is used for SAML authentication. This value is typically provided by the IdP.", + Required: true, + Type: schema.TypeString, + }, + "idp_metadata_url": { + Description: "The URL of the SAML metadata for the Identity Provider (IdP). This is used to configure SAML-based authentication with the IdP.", + Required: true, + Type: schema.TypeString, + }, + "idp_pemtrustedcas_content": { + Description: "This parameter specifies the PEM-encoded root certificate authority (CA) content for the SAML identity provider (IdP) server verification. The root CA content is used to verify the SSL/TLS certificate presented by the server.", + Optional: true, + Type: schema.TypeString, + }, + "roles_key": { + Description: "Optional. Specifies the attribute in the SAML response where role information is stored, if available. Role attributes are not required for SAML authentication, but can be included in SAML assertions by most Identity Providers (IdPs) to determine user access levels or permissions.", + Optional: true, + Type: schema.TypeString, + }, + "sp_entity_id": { + Description: "The unique identifier for the Service Provider (SP) entity that is used for SAML authentication. This value is typically provided by the SP.", + Required: true, + Type: schema.TypeString, + }, + "subject_key": { + Description: "Optional. Specifies the attribute in the SAML response where the subject identifier is stored. If not configured, the NameID attribute is used by default.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "service_to_fork_from": { + Description: "Name of another service to fork from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/pg.go b/internal/sdkprovider/userconfig/service/pg.go new file mode 100644 index 000000000..ebb59ecfb --- /dev/null +++ b/internal/sdkprovider/userconfig/service/pg.go @@ -0,0 +1,630 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func schemaPg() *schema.Schema { + return &schema.Schema{ + Description: "Pg user configurable settings", + DiffSuppressFunc: diffSuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "additional_backup_regions": { + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{ + Description: "Target cloud.", + Type: schema.TypeString, + }, + MaxItems: 1, + Optional: true, + Type: schema.TypeSet, + }, + "admin_password": { + Description: "Custom password for admin user. Defaults to random string. This must be set only when a new service is being created.", + ForceNew: true, + Optional: true, + Sensitive: true, + Type: schema.TypeString, + }, + "admin_username": { + Description: "Custom username for admin user. This must be set only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "backup_hour": { + Description: "The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed.", + Optional: true, + Type: schema.TypeInt, + }, + "backup_minute": { + Description: "The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed.", + Optional: true, + Type: schema.TypeInt, + }, + "enable_ipv6": { + Description: "Register AAAA DNS records for the service, and allow IPv6 packets to service ports.", + Optional: true, + Type: schema.TypeBool, + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "migration": { + Description: "Migrate data from existing server", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "dbname": { + Description: "Database name for bootstrapping the initial connection.", + Optional: true, + Type: schema.TypeString, + }, + "host": { + Description: "Hostname or IP address of the server where to migrate data from.", + Required: true, + Type: schema.TypeString, + }, + "ignore_dbs": { + Description: "Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).", + Optional: true, + Type: schema.TypeString, + }, + "method": { + Description: "The migration method to be used (currently supported only by Redis, MySQL and PostgreSQL service types).", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"dump", "replication"}, false), + }, + "password": { + Description: "Password for authentication with the server where to migrate data from.", + Optional: true, + Sensitive: true, + Type: schema.TypeString, + }, + "port": { + Description: "Port number of the server where to migrate data from.", + Required: true, + Type: schema.TypeInt, + }, + "ssl": { + Default: true, + Description: "The server where to migrate data from is secured with SSL. The default value is `true`.", + Optional: true, + Type: schema.TypeBool, + }, + "username": { + Description: "User name for authentication with the server where to migrate data from.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "pg": { + Description: "postgresql.conf configuration values", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "autovacuum_analyze_scale_factor": { + Description: "Specifies a fraction of the table size to add to autovacuum_analyze_threshold when deciding whether to trigger an ANALYZE. The default is 0.2 (20% of table size).", + Optional: true, + Type: schema.TypeFloat, + }, + "autovacuum_analyze_threshold": { + Description: "Specifies the minimum number of inserted, updated or deleted tuples needed to trigger an ANALYZE in any one table. The default is 50 tuples.", + Optional: true, + Type: schema.TypeInt, + }, + "autovacuum_freeze_max_age": { + Description: "Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. Note that the system will launch autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. This parameter will cause the server to be restarted.", + Optional: true, + Type: schema.TypeInt, + }, + "autovacuum_max_workers": { + Description: "Specifies the maximum number of autovacuum processes (other than the autovacuum launcher) that may be running at any one time. The default is three. This parameter can only be set at server start.", + Optional: true, + Type: schema.TypeInt, + }, + "autovacuum_naptime": { + Description: "Specifies the minimum delay between autovacuum runs on any given database. The delay is measured in seconds, and the default is one minute.", + Optional: true, + Type: schema.TypeInt, + }, + "autovacuum_vacuum_cost_delay": { + Description: "Specifies the cost delay value that will be used in automatic VACUUM operations. If -1 is specified, the regular vacuum_cost_delay value will be used. The default value is 20 milliseconds.", + Optional: true, + Type: schema.TypeInt, + }, + "autovacuum_vacuum_cost_limit": { + Description: "Specifies the cost limit value that will be used in automatic VACUUM operations. If -1 is specified (which is the default), the regular vacuum_cost_limit value will be used.", + Optional: true, + Type: schema.TypeInt, + }, + "autovacuum_vacuum_scale_factor": { + Description: "Specifies a fraction of the table size to add to autovacuum_vacuum_threshold when deciding whether to trigger a VACUUM. The default is 0.2 (20% of table size).", + Optional: true, + Type: schema.TypeFloat, + }, + "autovacuum_vacuum_threshold": { + Description: "Specifies the minimum number of updated or deleted tuples needed to trigger a VACUUM in any one table. The default is 50 tuples.", + Optional: true, + Type: schema.TypeInt, + }, + "bgwriter_delay": { + Description: "Specifies the delay between activity rounds for the background writer in milliseconds. Default is 200.", + Optional: true, + Type: schema.TypeInt, + }, + "bgwriter_flush_after": { + Description: "Whenever more than bgwriter_flush_after bytes have been written by the background writer, attempt to force the OS to issue these writes to the underlying storage. Specified in kilobytes, default is 512. Setting of 0 disables forced writeback.", + Optional: true, + Type: schema.TypeInt, + }, + "bgwriter_lru_maxpages": { + Description: "In each round, no more than this many buffers will be written by the background writer. Setting this to zero disables background writing. Default is 100.", + Optional: true, + Type: schema.TypeInt, + }, + "bgwriter_lru_multiplier": { + Description: "The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is 2.0.", + Optional: true, + Type: schema.TypeFloat, + }, + "deadlock_timeout": { + Description: "This is the amount of time, in milliseconds, to wait on a lock before checking to see if there is a deadlock condition.", + Optional: true, + Type: schema.TypeInt, + }, + "default_toast_compression": { + Description: "Specifies the default TOAST compression method for values of compressible columns (the default is lz4).", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"lz4", "pglz"}, false), + }, + "idle_in_transaction_session_timeout": { + Description: "Time out sessions with open transactions after this number of milliseconds.", + Optional: true, + Type: schema.TypeInt, + }, + "jit": { + Description: "Controls system-wide use of Just-in-Time Compilation (JIT).", + Optional: true, + Type: schema.TypeBool, + }, + "log_autovacuum_min_duration": { + Description: "Causes each action executed by autovacuum to be logged if it ran for at least the specified number of milliseconds. Setting this to zero logs all autovacuum actions. Minus-one (the default) disables logging autovacuum actions.", + Optional: true, + Type: schema.TypeInt, + }, + "log_error_verbosity": { + Description: "Controls the amount of detail written in the server log for each message that is logged.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"TERSE", "DEFAULT", "VERBOSE"}, false), + }, + "log_line_prefix": { + Description: "Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"'pid=%p,user=%u,db=%d,app=%a,client=%h '", "'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '", "'%m [%p] %q[user=%u,db=%d,app=%a] '"}, false), + }, + "log_min_duration_statement": { + Description: "Log statements that take more than this number of milliseconds to run, -1 disables.", + Optional: true, + Type: schema.TypeInt, + }, + "log_temp_files": { + Description: "Log statements for each temporary file created larger than this number of kilobytes, -1 disables.", + Optional: true, + Type: schema.TypeInt, + }, + "max_files_per_process": { + Description: "PostgreSQL maximum number of files that can be open per process.", + Optional: true, + Type: schema.TypeInt, + }, + "max_locks_per_transaction": { + Description: "PostgreSQL maximum locks per transaction.", + Optional: true, + Type: schema.TypeInt, + }, + "max_logical_replication_workers": { + Description: "PostgreSQL maximum logical replication workers (taken from the pool of max_parallel_workers).", + Optional: true, + Type: schema.TypeInt, + }, + "max_parallel_workers": { + Description: "Sets the maximum number of workers that the system can support for parallel queries.", + Optional: true, + Type: schema.TypeInt, + }, + "max_parallel_workers_per_gather": { + Description: "Sets the maximum number of workers that can be started by a single Gather or Gather Merge node.", + Optional: true, + Type: schema.TypeInt, + }, + "max_pred_locks_per_transaction": { + Description: "PostgreSQL maximum predicate locks per transaction.", + Optional: true, + Type: schema.TypeInt, + }, + "max_prepared_transactions": { + Description: "PostgreSQL maximum prepared transactions.", + Optional: true, + Type: schema.TypeInt, + }, + "max_replication_slots": { + Description: "PostgreSQL maximum replication slots.", + Optional: true, + Type: schema.TypeInt, + }, + "max_slot_wal_keep_size": { + Description: "PostgreSQL maximum WAL size (MB) reserved for replication slots. Default is -1 (unlimited). wal_keep_size minimum WAL size setting takes precedence over this.", + Optional: true, + Type: schema.TypeInt, + }, + "max_stack_depth": { + Description: "Maximum depth of the stack in bytes.", + Optional: true, + Type: schema.TypeInt, + }, + "max_standby_archive_delay": { + Description: "Max standby archive delay in milliseconds.", + Optional: true, + Type: schema.TypeInt, + }, + "max_standby_streaming_delay": { + Description: "Max standby streaming delay in milliseconds.", + Optional: true, + Type: schema.TypeInt, + }, + "max_wal_senders": { + Description: "PostgreSQL maximum WAL senders.", + Optional: true, + Type: schema.TypeInt, + }, + "max_worker_processes": { + Description: "Sets the maximum number of background processes that the system can support.", + Optional: true, + Type: schema.TypeInt, + }, + "pg_partman_bgw__dot__interval": { + Description: "Sets the time interval to run pg_partman's scheduled tasks.", + Optional: true, + Type: schema.TypeInt, + }, + "pg_partman_bgw__dot__role": { + Description: "Controls which role to use for pg_partman's scheduled background tasks.", + Optional: true, + Type: schema.TypeString, + }, + "pg_stat_monitor__dot__pgsm_enable_query_plan": { + Description: "Enables or disables query plan monitoring.", + Optional: true, + Type: schema.TypeBool, + }, + "pg_stat_monitor__dot__pgsm_max_buckets": { + Description: "Sets the maximum number of buckets .", + Optional: true, + Type: schema.TypeInt, + }, + "pg_stat_statements__dot__track": { + Description: "Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"all", "top", "none"}, false), + }, + "temp_file_limit": { + Description: "PostgreSQL temporary file limit in KiB, -1 for unlimited.", + Optional: true, + Type: schema.TypeInt, + }, + "timezone": { + Description: "PostgreSQL service timezone.", + Optional: true, + Type: schema.TypeString, + }, + "track_activity_query_size": { + Description: "Specifies the number of bytes reserved to track the currently executing command for each active session.", + Optional: true, + Type: schema.TypeInt, + }, + "track_commit_timestamp": { + Description: "Record commit time of transactions.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"off", "on"}, false), + }, + "track_functions": { + Description: "Enables tracking of function call counts and time used.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"all", "pl", "none"}, false), + }, + "track_io_timing": { + Description: "Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"off", "on"}, false), + }, + "wal_sender_timeout": { + Description: "Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout.", + Optional: true, + Type: schema.TypeInt, + }, + "wal_writer_delay": { + Description: "WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance.", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "pg_read_replica": { + Description: "Should the service which is being forked be a read replica (deprecated, use read_replica service integration instead).", + Optional: true, + Type: schema.TypeBool, + }, + "pg_service_to_fork_from": { + Description: "Name of the PG Service from which to fork (deprecated, use service_to_fork_from). This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "pg_stat_monitor_enable": { + Default: false, + Description: "Enable the pg_stat_monitor extension. Enabling this extension will cause the cluster to be restarted.When this extension is enabled, pg_stat_statements results for utility commands are unreliable. The default value is `false`.", + Optional: true, + Type: schema.TypeBool, + }, + "pg_version": { + Description: "PostgreSQL major version.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"11", "12", "13", "14", "15", "10"}, false), + }, + "pgbouncer": { + Description: "PGBouncer connection pooling settings", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "autodb_idle_timeout": { + Description: "If the automatically created database pools have been unused this many seconds, they are freed. If 0 then timeout is disabled. (seconds).", + Optional: true, + Type: schema.TypeInt, + }, + "autodb_max_db_connections": { + Description: "Do not allow more than this many server connections per database (regardless of user). Setting it to 0 means unlimited.", + Optional: true, + Type: schema.TypeInt, + }, + "autodb_pool_mode": { + Description: "PGBouncer pool mode.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"session", "transaction", "statement"}, false), + }, + "autodb_pool_size": { + Description: "If non-zero then create automatically a pool of that size per user when a pool doesn't exist.", + Optional: true, + Type: schema.TypeInt, + }, + "ignore_startup_parameters": { + Description: "List of parameters to ignore when given in startup packet.", + Elem: &schema.Schema{ + Description: "Enum of parameters to ignore when given in startup packet.", + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"extra_float_digits", "search_path"}, false), + }, + MaxItems: 32, + Optional: true, + Type: schema.TypeSet, + }, + "min_pool_size": { + Description: "Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size.", + Optional: true, + Type: schema.TypeInt, + }, + "server_idle_timeout": { + Description: "If a server connection has been idle more than this many seconds it will be dropped. If 0 then timeout is disabled. (seconds).", + Optional: true, + Type: schema.TypeInt, + }, + "server_lifetime": { + Description: "The pooler will close an unused server connection that has been connected longer than this. (seconds).", + Optional: true, + Type: schema.TypeInt, + }, + "server_reset_query_always": { + Description: "Run server_reset_query (DISCARD ALL) in all pooling modes.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "pglookout": { + Description: "PGLookout settings", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"max_failover_replication_time_lag": { + Default: 60, + Description: "Number of seconds of master unavailability before triggering database failover to standby. The default value is `60`.", + Optional: true, + Type: schema.TypeInt, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "private_access": { + Description: "Allow access to selected service ports from private networks", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "pg": { + Description: "Allow clients to connect to pg with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "pgbouncer": { + Description: "Allow clients to connect to pgbouncer with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "privatelink_access": { + Description: "Allow access to selected service components through Privatelink", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "pg": { + Description: "Enable pg.", + Optional: true, + Type: schema.TypeBool, + }, + "pgbouncer": { + Description: "Enable pgbouncer.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Enable prometheus.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "project_to_fork_from": { + Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "public_access": { + Description: "Allow access to selected service ports from the public Internet", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "pg": { + Description: "Allow clients to connect to pg from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "pgbouncer": { + Description: "Allow clients to connect to pgbouncer from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "prometheus": { + Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "recovery_target_time": { + Description: "Recovery target time when forking a service. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "service_to_fork_from": { + Description: "Name of another service to fork from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "shared_buffers_percentage": { + Description: "Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value.", + Optional: true, + Type: schema.TypeFloat, + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + "synchronous_replication": { + Description: "Synchronous replication type. Note that the service plan also needs to support synchronous replication.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"quorum", "off"}, false), + }, + "timescaledb": { + Description: "TimescaleDB extension configuration values", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"max_background_workers": { + Description: "The number of background workers for timescaledb operations. You should configure this setting to the sum of your number of databases and the total number of concurrent background workers you want running at any given point in time.", + Optional: true, + Type: schema.TypeInt, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "variant": { + Description: "Variant of the PostgreSQL service, may affect the features that are exposed by default.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"aiven", "timescale"}, false), + }, + "work_mem": { + Description: "Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB).", + Optional: true, + Type: schema.TypeInt, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/redis.go b/internal/sdkprovider/userconfig/service/redis.go new file mode 100644 index 000000000..7dfbbb811 --- /dev/null +++ b/internal/sdkprovider/userconfig/service/redis.go @@ -0,0 +1,261 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func schemaRedis() *schema.Schema { + return &schema.Schema{ + Description: "Redis user configurable settings", + DiffSuppressFunc: diffSuppressUnchanged, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "additional_backup_regions": { + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{ + Description: "Target cloud.", + Type: schema.TypeString, + }, + MaxItems: 1, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter": { + Deprecated: "Deprecated. Use `ip_filter_string` instead.", + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "ip_filter_object": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "description": { + Description: "Description for IP filter list entry.", + Optional: true, + Type: schema.TypeString, + }, + "network": { + Description: "CIDR address block.", + Required: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1024, + Optional: true, + Type: schema.TypeList, + }, + "ip_filter_string": { + Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", + DiffSuppressFunc: diffSuppressIpFilter, + Elem: &schema.Schema{ + Description: "CIDR address block, either as a string, or in a dict with an optional description field.", + Type: schema.TypeString, + }, + MaxItems: 1024, + Optional: true, + Type: schema.TypeSet, + }, + "migration": { + Description: "Migrate data from existing server", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "dbname": { + Description: "Database name for bootstrapping the initial connection.", + Optional: true, + Type: schema.TypeString, + }, + "host": { + Description: "Hostname or IP address of the server where to migrate data from.", + Required: true, + Type: schema.TypeString, + }, + "ignore_dbs": { + Description: "Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).", + Optional: true, + Type: schema.TypeString, + }, + "method": { + Description: "The migration method to be used (currently supported only by Redis, MySQL and PostgreSQL service types).", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"dump", "replication"}, false), + }, + "password": { + Description: "Password for authentication with the server where to migrate data from.", + Optional: true, + Sensitive: true, + Type: schema.TypeString, + }, + "port": { + Description: "Port number of the server where to migrate data from.", + Required: true, + Type: schema.TypeInt, + }, + "ssl": { + Default: true, + Description: "The server where to migrate data from is secured with SSL. The default value is `true`.", + Optional: true, + Type: schema.TypeBool, + }, + "username": { + Description: "User name for authentication with the server where to migrate data from.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "private_access": { + Description: "Allow access to selected service ports from private networks", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "prometheus": { + Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + "redis": { + Description: "Allow clients to connect to redis with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "privatelink_access": { + Description: "Allow access to selected service components through Privatelink", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "prometheus": { + Description: "Enable prometheus.", + Optional: true, + Type: schema.TypeBool, + }, + "redis": { + Description: "Enable redis.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "project_to_fork_from": { + Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "public_access": { + Description: "Allow access to selected service ports from the public Internet", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "prometheus": { + Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + "redis": { + Description: "Allow clients to connect to redis from the public internet for service nodes that are in a project VPC or another type of private network.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "recovery_basebackup_name": { + Description: "Name of the basebackup to restore in forked service.", + Optional: true, + Type: schema.TypeString, + }, + "redis_acl_channels_default": { + Description: "Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"allchannels", "resetchannels"}, false), + }, + "redis_io_threads": { + Description: "Set Redis IO thread count. Changing this will cause a restart of the Redis service.", + Optional: true, + Type: schema.TypeInt, + }, + "redis_lfu_decay_time": { + Default: 1, + Description: "LFU maxmemory-policy counter decay time in minutes. The default value is `1`.", + Optional: true, + Type: schema.TypeInt, + }, + "redis_lfu_log_factor": { + Default: 10, + Description: "Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. The default value is `10`.", + Optional: true, + Type: schema.TypeInt, + }, + "redis_maxmemory_policy": { + Default: "noeviction", + Description: "Redis maxmemory-policy. The default value is `noeviction`.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"noeviction", "allkeys-lru", "volatile-lru", "allkeys-random", "volatile-random", "volatile-ttl", "volatile-lfu", "allkeys-lfu"}, false), + }, + "redis_notify_keyspace_events": { + Description: "Set notify-keyspace-events option.", + Optional: true, + Type: schema.TypeString, + }, + "redis_number_of_databases": { + Description: "Set number of Redis databases. Changing this will cause a restart of the Redis service.", + Optional: true, + Type: schema.TypeInt, + }, + "redis_persistence": { + Description: "When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"off", "rdb"}, false), + }, + "redis_pubsub_client_output_buffer_limit": { + Description: "Set output buffer limit for pub / sub clients in MB. The value is the hard limit, the soft limit is 1/4 of the hard limit. When setting the limit, be mindful of the available memory in the selected service plan.", + Optional: true, + Type: schema.TypeInt, + }, + "redis_ssl": { + Default: true, + Description: "Require SSL to access Redis. The default value is `true`.", + Optional: true, + Type: schema.TypeBool, + }, + "redis_timeout": { + Default: 300, + Description: "Redis idle connection timeout in seconds. The default value is `300`.", + Optional: true, + Type: schema.TypeInt, + }, + "service_to_fork_from": { + Description: "Name of another service to fork from. This has effect only when a new service is being created.", + ForceNew: true, + Optional: true, + Type: schema.TypeString, + }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + } +} diff --git a/internal/sdkprovider/userconfig/service/service.go b/internal/sdkprovider/userconfig/service/service.go new file mode 100644 index 000000000..da8cc8aa1 --- /dev/null +++ b/internal/sdkprovider/userconfig/service/service.go @@ -0,0 +1,99 @@ +// Code generated by user config generator. DO NOT EDIT. + +package service + +import ( + "regexp" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func GetUserConfig(kind string) *schema.Schema { + switch kind { + case "cassandra": + return schemaCassandra() + case "clickhouse": + return schemaClickhouse() + case "flink": + return schemaFlink() + case "grafana": + return schemaGrafana() + case "influxdb": + return schemaInfluxdb() + case "kafka": + return schemaKafka() + case "kafka_connect": + return schemaKafkaConnect() + case "kafka_mirrormaker": + return schemaKafkaMirrormaker() + case "m3aggregator": + return schemaM3aggregator() + case "m3db": + return schemaM3db() + case "mysql": + return schemaMysql() + case "opensearch": + return schemaOpensearch() + case "pg": + return schemaPg() + case "redis": + return schemaRedis() + default: + panic("unknown user config type: " + kind) + } +} +func strContains(s, substr string) bool { + return strings.Contains(s, substr) +} + +var reSetElement = regexp.MustCompile("\\.[0-9]{9}$") + +func diffSuppressUnchanged(k, old, new string, d *schema.ResourceData) bool { + // Lists, sets and objects (object is list with one item). + if k[len(k)-1:] == "#" { + if d.HasChange(k) { + // By some reason terraform might mark objects as "changed". + // In that case we perform this check manually. + // "nil" means it doesn't have changed fields + key := strings.TrimSuffix(k, ".#") + v, ok := d.Get(key).([]any) + return ok && len(v) == 1 && v[0] == nil + } + return true + } + + // Ip filter items handled with a special suppressor. + if strContains(k, ".ip_filter.") || strContains(k, ".ip_filter_string.") { + return diffSuppressIpFilter(k, old, new, d) + } + + // Doesn't suppress "set" items. + // Set item ends with a 9 length hash int. + if reSetElement.MatchString(k) { + return false + } + + // Object properties. + // "old" — is something read from API + // "new" — is what read from tf file + // If value is computed it has non-empty old (any value) and empty "new"". + switch new { + case "", "0", "false": + // "default" value, that is removed: + // "" — kafka_version = "3.5" -> "" + // 0 — backup_hour = "4" -> 0 + // false — allow_sign_up = true -> false + return !d.HasChange(k) + } + return false +} + +func diffSuppressIpFilter(k, old, new string, d *schema.ResourceData) bool { + // Suppresses ip_filter = [0.0.0.0/0] + path := strings.Split(k, ".") + // Turns ~ip_filter.1234 to ~ip_filter.# + v, ok := d.GetOk(strings.Join(path[:len(path)-1], ".") + ".#") + // Literally, if value is "0.0.0.0/0" and parent's length is "1" + return old == "0.0.0.0/0" && new == "" && ok && v.(int) == 1 +} diff --git a/main.go b/main.go index d82346f09..5da0472d6 100644 --- a/main.go +++ b/main.go @@ -12,6 +12,7 @@ import ( ) //go:generate go test -tags userconfig ./internal/schemautil/userconfig +//go:generate go run ./ucgenerator/... --services cassandra,clickhouse,flink,grafana,influxdb,kafka,kafka_connect,kafka_mirrormaker,m3aggregator,m3db,mysql,opensearch,pg,redis // version is the version of the provider. var version = "dev" diff --git a/ucgenerator/main.go b/ucgenerator/main.go new file mode 100644 index 000000000..11934688d --- /dev/null +++ b/ucgenerator/main.go @@ -0,0 +1,406 @@ +package main + +import ( + "flag" + "fmt" + "log" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/aiven/go-api-schemas/pkg/dist" + "github.com/dave/jennifer/jen" + "golang.org/x/exp/slices" + "golang.org/x/tools/imports" + "gopkg.in/yaml.v3" +) + +const ( + destPath = "./internal/sdkprovider/userconfig/" + localPrefix = "github.com/aiven/terraform-provider-aiven" + importSchema = "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + importSchemaUtil = "github.com/aiven/terraform-provider-aiven/internal/schemautil" + importValidation = "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + codeGenerated = "Code generated by user config generator. DO NOT EDIT." +) + +func main() { + var serviceList, integrationList string + flag.StringVar(&serviceList, "services", "", "Comma separated service list of names to generate for") + flag.StringVar(&integrationList, "integrations", "", "Comma separated integrations list of names to generate for") + flag.Parse() + + if serviceList+integrationList == "" { + log.Fatal("--service or --integrations must be provided") + } + + if serviceList != "" { + err := generate("service", dist.ServiceTypes, strings.Split(serviceList, ",")) + if err != nil { + log.Fatal(err) + } + } + + if integrationList != "" { + err := generate("integration", dist.IntegrationTypes, strings.Split(integrationList, ",")) + if err != nil { + log.Fatal(err) + } + } +} + +func generate(kind string, data []byte, keys []string) error { + slices.Sort(keys) + var root map[string]*object + + err := yaml.Unmarshal(data, &root) + if err != nil { + return err + } + + dirPath := filepath.Join(destPath, kind) + err = os.MkdirAll(dirPath, os.ModePerm) + if err != nil { + return err + } + + // Fixes imports order + imports.LocalPrefix = localPrefix + + doneKeys := make([]string, 0, len(keys)) + doneNames := make([]string, 0, len(keys)) + + for _, key := range keys { + o, ok := root[key] + if !ok { + return fmt.Errorf("key %q not found in spec", key) + } + + name := toCamelCase(key) + doneKeys = append(doneKeys, key) + doneNames = append(doneNames, name) + + o.isRoot = true + o.init(name) + if o.Description == "" { + o.Description = o.camelName + " user configurable settings" + } + + f := jen.NewFile(kind) + f.HeaderComment(codeGenerated) + f.ImportName(importSchema, "schema") + f.ImportName(importSchemaUtil, "schemautil") + genSchema(f, o) + + // Sorts imports + b, err := imports.Process("", []byte(f.GoString()), nil) + if err != nil { + return err + } + + // Saves file + err = os.WriteFile(filepath.Join(dirPath, key+".go"), b, 0644) + if err != nil { + return err + } + } + + cases := make([]jen.Code, 0, len(keys)+1) + for i, k := range doneKeys { + cases = append(cases, jen.Case(jen.Lit(k)).Block( + jen.Return(jen.Id("schema"+doneNames[i]).Call()), + )) + } + + // Panics if unknown kind requested + cases = append(cases, jen.Default().Block(jen.Panic(jen.Lit("unknown user config type: ").Op("+").Id("kind")))) + + f := jen.NewFile(kind) + f.HeaderComment(codeGenerated) + f.ImportName(importSchema, "schema") + f.Func().Id("GetUserConfig").Params(jen.Id("kind").String()).Op("*").Qual(importSchema, "Schema").Block( + jen.Switch(jen.Id("kind")).Block(cases...), + ) + + // Can't add import manually + // https://github.com/dave/jennifer/issues/20 + // This little workaround imports "strings" package + f.ImportName("strings", "strings") + f.Func().Id("strContains").Params(jen.Id("s"), jen.Id("substr").String()).Bool().Block( + jen.Return(jen.Qual("strings", "Contains").Call(jen.Id("s"), jen.Id("substr"))), + ) + f.Var().Id("reSetElement").Op("=").Qual("regexp", "MustCompile").Call(jen.Lit(`\.[0-9]{9}$`)) + f.Op(diffSuppressors) + return f.Save(filepath.Join(dirPath, kind+".go")) +} + +func genSchema(f *jen.File, o *object) { + f.Func().Id("schema"+o.camelName).Params().Op("*").Qual(importSchema, "Schema").Block( + jen.Return(jen.Op("&").Qual(importSchema, "Schema").Values(getSchemaValues(o))), + ) +} + +func getSchemaValues(o *object) jen.Dict { + values := make(jen.Dict) + + if d := getDescription(o); d != "" { + for old, n := range replaceDescriptionSubStrings { + d = strings.ReplaceAll(d, old, n) + } + values[jen.Id("Description")] = jen.Lit(d) + } + + var t string + switch o.Type { + case objectTypeObject, objectTypeArray: + if o.isSchemaless() { + // todo: handle schemaless if this happens + log.Fatalf("schemaless is not implement: %q", o.jsonName) + } + + t = "List" + if o.isArray() && o.ArrayItems.isScalar() { + // Stores scalars in the set type. + // Nested sets of objects do not work well in Terraform: + // - Changing a field shows diff for the whole object, + // because hash is calculated for the object, not per field. + // So no per-field updates, whole object replacement only. + // https://discuss.hashicorp.com/t/provider-schema-typeset-detect-changes/32546 + // - There is a bug that doesn't let you put a set deep inside ResourceData + // https://github.com/hashicorp/terraform-plugin-sdk/issues/459 + // - The diff itself is invalid for nested sets (not on the root level). + // It just doesn't work as expected in all cases. + t = "Set" + } + + if o.MinItems != nil { + values[jen.Id("MinItems")] = jen.Lit(*o.MinItems) + } + if o.MaxItems != nil { + values[jen.Id("MaxItems")] = jen.Lit(*o.MaxItems) + } + case objectTypeBoolean: + t = "Bool" + case objectTypeString: + t = "String" + case objectTypeInteger: + t = "Int" + case objectTypeNumber: + t = "Float" + default: + log.Fatalf("unknown type %q for %q", o.Type, o.jsonName) + } + + values[jen.Id("Type")] = jen.Qual(importSchema, "Type"+t) + if o.IsDeprecated { + if o.DeprecationNotice == "" { + log.Fatalf("missing deprecation notice for %q", o.jsonName) + } + values[jen.Id("Deprecated")] = jen.Lit(o.DeprecationNotice) + } + + if o.CreateOnly { + values[jen.Id("ForceNew")] = jen.True() + } + + // Doesn't mark with required or optional scalar elements of arrays + if !(o.isScalar() && o.parent.isArray()) { + if o.Required { + values[jen.Id("Required")] = jen.True() + } else { + values[jen.Id("Optional")] = jen.True() + } + } + + if o.Default != nil && !o.Required { + switch { + case o.isArray() && o.ArrayItems.isScalar(): + switch o.jsonName { + case "ip_filter", "ip_filter_string": + values[jen.Id("DiffSuppressFunc")] = jen.Id("diffSuppressIpFilter") + } + case o.isScalar(): + values[jen.Id("Default")] = scalarLit(o, o.Default) + } + } + + if o.isScalar() { + if strings.Contains(o.jsonName, "api_key") || strings.Contains(o.jsonName, "password") { + values[jen.Id("Sensitive")] = jen.True() + } + + if o.Enum != nil { + args := make([]jen.Code, 0) + for _, v := range o.Enum { + args = append(args, scalarLit(o, v.Value)) + } + + // There are no other types functions. + // Bool and number won't compile + switch o.Type { + case objectTypeString: + values[jen.Id("ValidateFunc")] = jen.Qual(importValidation, "StringInSlice").Call(scalarArrayLit(o, args), jen.False()) + case objectTypeInteger: + values[jen.Id("ValidateFunc")] = jen.Qual(importValidation, "IntInSlice").Call(scalarArrayLit(o, args)) + } + } + + return values + } + + if len(o.ConflictsWith) > 0 { + args := make([]jen.Code, len(o.ConflictsWith)) + for _, s := range o.ConflictsWith { + args = append(args, jen.Lit(s)) + } + values[jen.Id("ConflictsWith")] = jen.Index().String().Values(args...) + } + + if o.isArray() { + if o.ArrayItems.isScalar() { + fields := getSchemaValues(o.ArrayItems) + values[jen.Id("Elem")] = jen.Op("&").Qual(importSchema, "Schema").Values(fields) + return values + } + + // Renders the array as an object + o = o.ArrayItems + } + + if o.isRoot { + values[jen.Id("DiffSuppressFunc")] = jen.Id(`diffSuppressUnchanged`) + } + + fields := make(jen.Dict) + for _, p := range o.properties { + fields[jen.Lit(p.tfName)] = jen.Values(getSchemaValues(p)) + } + + values[jen.Id("Elem")] = jen.Op("&").Qual(importSchema, "Resource").Values(jen.Dict{ + jen.Id("Schema"): jen.Map(jen.String()).Op("*").Qual(importSchema, "Schema").Values(fields), + }) + + return values +} + +func getDescription(o *object) string { + desc := make([]string, 0) + d := o.Description + if len(d) < len(o.Title) { + d = o.Title + } + + // Comes from the schema, quite confusing + d = strings.TrimSuffix(d, "The default value is `map[]`.") + if d != "" { + desc = append(desc, addDot(d)) + } + + if o.isScalar() && o.Default != nil { + desc = append(desc, fmt.Sprintf("The default value is `%v`.", o.Default)) + } + + // Trims dot from description, so it doesn't look weird with link to nested schema + // Example: Databases to expose[dot] (see [below for nested schema]...) + if len(desc) == 1 && o.isNestedBlock() { + return strings.Trim(desc[0], ".") + } + + return strings.Join(desc, " ") +} + +func addDot(s string) string { + if s != "" { + switch s[len(s)-1:] { + case ".", "!", "?": + default: + s += "." + } + } + return s +} + +var replaceDescriptionSubStrings = map[string]string{ + "DEPRECATED: ": "", + "This setting is deprecated. ": "", + "[seconds]": "(seconds)", +} + +func scalarLit(o *object, value any) *jen.Statement { + switch o.Type { + case objectTypeString: + return jen.Lit(value.(string)) + case objectTypeBoolean: + return jen.Lit(value.(bool)) + case objectTypeInteger: + n, _ := strconv.Atoi(value.(string)) + return jen.Lit(n) + case objectTypeNumber: + return jen.Lit(value.(float64)) + } + log.Fatalf("unknown scalar %v", o) + return nil +} + +func scalarArrayLit(o *object, args []jen.Code) *jen.Statement { + switch o.Type { + case objectTypeString: + return jen.Index().String().Values(args...) + case objectTypeInteger: + return jen.Index().Int().Values(args...) + } + log.Fatalf("unexpected element type of array for default value: %q", o.Type) + return nil +} + +const diffSuppressors = ` +func diffSuppressUnchanged(k, old, new string, d *schema.ResourceData) bool { + // Lists, sets and objects (object is list with one item). + if k[len(k)-1:] == "#" { + if d.HasChange(k) { + // By some reason terraform might mark objects as "changed". + // In that case we perform this check manually. + // "nil" means it doesn't have changed fields + key := strings.TrimSuffix(k, ".#") + v, ok := d.Get(key).([]any) + return ok && len(v) == 1 && v[0] == nil + } + return true + } + + // Ip filter items handled with a special suppressor. + if strContains(k, ".ip_filter.") || strContains(k, ".ip_filter_string.") { + return diffSuppressIpFilter(k, old, new, d) + } + + // Doesn't suppress "set" items. + // Set item ends with a 9 length hash int. + if reSetElement.MatchString(k) { + return false + } + + // Object properties. + // "old" — is something read from API + // "new" — is what read from tf file + // If value is computed it has non-empty old (any value) and empty "new"". + switch new { + case "", "0", "false": + // "default" value, that is removed: + // "" — kafka_version = "3.5" -> "" + // 0 — backup_hour = "4" -> 0 + // false — allow_sign_up = true -> false + return !d.HasChange(k) + } + return false +} + +func diffSuppressIpFilter(k, old, new string, d *schema.ResourceData) bool { + // Suppresses ip_filter = [0.0.0.0/0] + path := strings.Split(k, ".") + // Turns ~ip_filter.1234 to ~ip_filter.# + v, ok := d.GetOk(strings.Join(path[:len(path)-1], ".") + ".#") + // Literally, if value is "0.0.0.0/0" and parent's length is "1" + return old == "0.0.0.0/0" && new == "" && ok && v.(int) == 1 +} +` diff --git a/ucgenerator/models.go b/ucgenerator/models.go new file mode 100644 index 000000000..1c4ccd67a --- /dev/null +++ b/ucgenerator/models.go @@ -0,0 +1,283 @@ +package main + +import ( + "encoding/json" + "fmt" + "log" + "strings" + + "github.com/stoewer/go-strcase" + "golang.org/x/exp/slices" +) + +type objectType string + +const ( + objectTypeObject objectType = "object" + objectTypeArray objectType = "array" + objectTypeString objectType = "string" + objectTypeBoolean objectType = "boolean" + objectTypeInteger objectType = "integer" + objectTypeNumber objectType = "number" +) + +type object struct { + isRoot bool // top level object + jsonName string // original name from json spec + tfName string // terraform manifest field, unlike jsonName, can't store dot symbol + tfoStructName string + dtoStructName string + camelName string + varName string + attrsName string + properties []*object + parent *object + + Type objectType `yaml:"-"` + Required bool `yaml:"-"` + + IsDeprecated bool `yaml:"is_deprecated"` + DeprecationNotice string `yaml:"deprecation_notice"` + Default any `yaml:"default"` + Enum []*struct { + Value string `yaml:"value"` + IsDeprecated bool `yaml:"is_deprecated"` + } `yaml:"enum"` + Pattern string `yaml:"pattern"` + MinItems *int `yaml:"min_items"` + MaxItems *int `yaml:"max_items"` + MinLength *int `yaml:"min_length"` + MaxLength *int `yaml:"max_length"` + Minimum *float64 `yaml:"minimum"` + Maximum *float64 `yaml:"maximum"` + OrigType any `yaml:"type"` + Format string `yaml:"format"` + Title string `yaml:"title"` + Description string `yaml:"description"` + Properties map[string]*object `yaml:"properties"` + ArrayItems *object `yaml:"items"` + OneOf []*object `yaml:"one_of"` + RequiredFields []string `yaml:"required"` + CreateOnly bool `yaml:"create_only"` + Nullable bool `yaml:"-"` + ConflictsWith []string `yaml:"-"` +} + +func (o *object) init(name string) { + unwrapArrayMultipleTypes(o) + + // Sorts properties, so they keep order on each generation + keys := make([]string, 0, len(o.Properties)) + for k := range o.Properties { + keys = append(keys, k) + } + + slices.Sort(keys) + for _, k := range keys { + o.properties = append(o.properties, o.Properties[k]) + } + + required := make(map[string]bool, len(o.RequiredFields)) + for _, k := range o.RequiredFields { + required[k] = true + } + + for _, k := range keys { + child := o.Properties[k] + child.parent = o + child.Required = required[k] + child.init(k) + } + + // Types can be list of strings, or a string + if v, ok := o.OrigType.(string); ok { + o.Type = objectType(v) + } else if v, ok := o.OrigType.([]interface{}); ok { + types := 0 + for _, t := range v { + switch s := t.(string); s { + case "null": + o.Nullable = true + default: + o.Type = objectType(s) + types++ + if types > 1 { + log.Fatalf("%q has multiple types", name) + } + } + } + } + + if o.isArray() { + o.ArrayItems.parent = o + o.ArrayItems.init(name) + } + + // In terraform objects are lists of one item. + // So we need to add a constraint + if o.isObject() { + one := 1 + o.MaxItems = &one + o.Default = nil + } + + if o.isArray() && o.ArrayItems.isObject() { + // In terraform object is a list of one object. + // So a real list with one object is the same. + // We need to see the difference for convert values to API + if o.MaxItems != nil && *o.MaxItems == 1 { + // As a fix, set nil to MaxItems + log.Fatalf("%q array with object element and MaxItems==1", name) + } + } + + // A fix that removes empty string default value + if o.Type == objectTypeString && o.Default != nil && o.Default.(string) == "" { + o.Default = nil + } + + o.camelName = toCamelCase(name) + low := toLowerFirst(o.camelName) + o.varName = low + "Var" + o.attrsName = low + "Attrs" + o.tfoStructName = "tfo" + o.camelName + o.dtoStructName = "dto" + o.camelName + o.jsonName = name + o.tfName = strings.ReplaceAll(name, ".", "__dot__") +} + +func (o *object) isNestedBlock() bool { + switch o.Type { + case objectTypeObject: + return len(o.Properties) > 0 + case objectTypeArray: + return o.ArrayItems.isObject() || o.ArrayItems.isArray() + } + return false +} + +func (o *object) isObject() bool { + return o.Type == objectTypeObject +} + +func (o *object) isComputed() bool { + if o.isRoot || !o.isObject() || o.Required || o.Default != nil { + return false + } + for _, p := range o.Properties { + if p.Required { + return false + } + } + return true +} + +func (o *object) isSchemaless() bool { + return o.isObject() && len(o.Properties) == 0 +} + +func (o *object) isArray() bool { + return o.Type == objectTypeArray +} + +func (o *object) isScalar() bool { + return !(o.isObject() || o.isArray()) +} + +func (o *object) ListProperties() []*object { + if o.isArray() { + return o.ArrayItems.properties + } + return o.properties +} + +// toCamelCase some fields have dots within, makes cleaner camelCase +func toCamelCase(s string) string { + return strcase.UpperCamelCase(strings.ReplaceAll(s, ".", "_")) +} + +func toLowerFirst(s string) string { + return strings.ToLower(s[0:1]) + s[1:] +} + +func deepcopy(o *object) *object { + clone := new(object) + b, _ := json.Marshal(o) + _ = json.Unmarshal(b, clone) + return clone +} + +const deprecationNotice = "Deprecated. Use `%s` instead." + +// unwrapArrayMultipleTypes automatically unwraps multiple types (type as list or oneOf) for arrays. +// A "foo" field with types "string" and "object" unwrapped to three fields: foo, foo_string, foo_object +// First seen type becomes the default one and marked as deprecated. +// Because that's what happens to multi-typed fields: +// first they have one type, then new added, we split them into separate fields in terraform +// and deprecate the original field. +func unwrapArrayMultipleTypes(o *object) { + for key, p := range o.Properties { + // So far, array types unwrapped only + if p.ArrayItems == nil { + continue + } + + prefix := key + "_" + fields := make(map[string]*object) + + // Unwraps multiple _type names_, e.g. [string, object] + types, ok := p.ArrayItems.OrigType.([]interface{}) + if ok { + strTypes := make([]string, 0) + for _, t := range types { + if s := t.(string); s != "null" { + strTypes = append(strTypes, s) + } + } + + if len(strTypes) == 1 { + continue + } + + // Multiple types. + // This ArrayItems object is composite: + // it has properties for the object type, and MaxLength for the string type. + // So it just copies it and sets type explicitly. + for _, s := range strTypes { + clone := deepcopy(p) + clone.ArrayItems.OrigType = s + fields[prefix+s] = clone + } + + p.IsDeprecated = true + p.DeprecationNotice = fmt.Sprintf(deprecationNotice, prefix+strTypes[0]) + p.ArrayItems.OrigType = strTypes[0] + fields[key] = p + + } else if len(p.ArrayItems.OneOf) != 0 { + // Unwraps multiple _type objects_, e.g. [{type:string}, {type: object}] + for i := range p.ArrayItems.OneOf { + t := p.ArrayItems.OneOf[i] + clone := deepcopy(p) + clone.ArrayItems = t + clone.Description = fmt.Sprintf("%s %s", addDot(p.Description), t.Description) + fields[prefix+t.OrigType.(string)] = clone + } + + // First seen type in priority. Replaces the original object + priorityType := prefix + p.ArrayItems.OneOf[0].OrigType.(string) + orig := deepcopy(fields[priorityType]) + orig.DeprecationNotice = fmt.Sprintf(deprecationNotice, priorityType) + orig.IsDeprecated = true + fields[key] = orig + } + + for k, c := range fields { + //for conflict := range fields { + // c.ConflictsWith = append(c.ConflictsWith, conflict) + //} + //slices.Sort(o.ConflictsWith) + o.Properties[k] = c + } + } +}