From e49fc633d9788119cdb3e2a49f38d98647d302e1 Mon Sep 17 00:00:00 2001 From: Murad Biashimov Date: Wed, 4 Oct 2023 14:05:50 +0300 Subject: [PATCH 01/27] feat(plugin): add new user config generator (#1342) --- CHANGELOG.md | 5 + Makefile | 5 +- docs/data-sources/service_integration.md | 226 +++---- docs/resources/service_integration.md | 53 +- go.mod | 6 +- go.sum | 11 +- internal/plugin/provider.go | 3 + .../service/serviceintegration/models.go | 106 ++++ .../service_integration_data_source.go | 140 ++++ .../service_integration_resource.go | 340 ++++++++++ .../service/serviceintegration/userconfig.go | 136 ++++ .../clickhousekafka/clickhouse_kafka.go | 408 ++++++++++++ .../clickhousekafka/clickhouse_kafka_test.go | 122 ++++ .../clickhouse_postgresql.go | 145 +++++ .../clickhouse_postgresql_test.go | 82 +++ .../userconfig/integration/datadog/datadog.go | 460 ++++++++++++++ .../integration/datadog/datadog_test.go | 132 ++++ .../external_aws_cloudwatch_metrics.go | 224 +++++++ .../external_aws_cloudwatch_metrics_test.go | 94 +++ .../integration/kafkaconnect/kafka_connect.go | 168 +++++ .../kafkaconnect/kafka_connect_test.go | 82 +++ .../integration/kafkalogs/kafka_logs.go | 114 ++++ .../integration/kafkalogs/kafka_logs_test.go | 78 +++ .../kafkamirrormaker/kafka_mirrormaker.go | 220 +++++++ .../kafka_mirrormaker_test.go | 88 +++ .../userconfig/integration/logs/logs.go | 133 ++++ .../userconfig/integration/logs/logs_test.go | 80 +++ .../userconfig/integration/metrics/metrics.go | 414 ++++++++++++ .../integration/metrics/metrics_test.go | 114 ++++ internal/plugin/util/schema.go | 16 +- internal/plugin/util/wait.go | 20 + internal/schemautil/plugin.go | 153 +++++ .../dist/integration_endpoint_types.go | 3 +- .../userconfig/dist/integration_types.go | 3 +- .../userconfig/dist/service_types.go | 3 +- internal/sdkprovider/provider/provider.go | 2 - .../service/kafkatopic/kafka_topic_cache.go | 2 - .../service_integration_test.go | 2 +- main.go | 1 + ucgenerator/main.go | 597 ++++++++++++++++++ ucgenerator/models.go | 142 +++++ ucgenerator/tests.go | 151 +++++ 42 files changed, 5124 insertions(+), 160 deletions(-) create mode 100644 internal/plugin/service/serviceintegration/models.go create mode 100644 internal/plugin/service/serviceintegration/service_integration_data_source.go create mode 100644 internal/plugin/service/serviceintegration/service_integration_resource.go create mode 100644 internal/plugin/service/serviceintegration/userconfig.go create mode 100644 internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka.go create mode 100644 internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka_test.go create mode 100644 internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql.go create mode 100644 internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql_test.go create mode 100644 internal/plugin/service/userconfig/integration/datadog/datadog.go create mode 100644 internal/plugin/service/userconfig/integration/datadog/datadog_test.go create mode 100644 internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics.go create mode 100644 internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics_test.go create mode 100644 internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect.go create mode 100644 internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect_test.go create mode 100644 internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs.go create mode 100644 internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs_test.go create mode 100644 internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker.go create mode 100644 internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker_test.go create mode 100644 internal/plugin/service/userconfig/integration/logs/logs.go create mode 100644 internal/plugin/service/userconfig/integration/logs/logs_test.go create mode 100644 internal/plugin/service/userconfig/integration/metrics/metrics.go create mode 100644 internal/plugin/service/userconfig/integration/metrics/metrics_test.go create mode 100644 internal/plugin/util/wait.go create mode 100644 internal/schemautil/plugin.go create mode 100644 ucgenerator/main.go create mode 100644 ucgenerator/models.go create mode 100644 ucgenerator/tests.go diff --git a/CHANGELOG.md b/CHANGELOG.md index f961f2ec5..6e2ba04eb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,11 @@ nav_order: 1 # Changelog +## [5.0.0] - YYYY-MM-DD + +- Migrate `aiven_service_integration` to the Plugin Framework + + ## [MAJOR.MINOR.PATCH] - YYYY-MM-DD ## [4.9.1] - 2023-10-03 diff --git a/Makefile b/Makefile index 45d16dee8..b2c99cb77 100644 --- a/Makefile +++ b/Makefile @@ -118,8 +118,9 @@ fmt-test: $(TERRAFMT) $(TERRAFMT) fmt ./internal -fv # On MACOS requires gnu-sed. Run `brew info gnu-sed` and follow instructions to replace default sed. +# Negative lookbehind tries to find "= `" pattern to not affect go templates for code generation imports: - find . -type f -name '*.go' -exec sed -zi 's/"\n\+\t"/"\n"/g' {} + + find . -type f -name '*.go' -exec sed -zi 's/(?<== `\s+)"\n\+\t"/"\n"/g' {} + goimports -local "github.com/aiven/terraform-provider-aiven" -w . ################################################# @@ -150,4 +151,4 @@ go-generate: go generate ./... -generate: go-generate docs +generate: go-generate imports docs diff --git a/docs/data-sources/service_integration.md b/docs/data-sources/service_integration.md index 354e8152b..f3f35d1e6 100644 --- a/docs/data-sources/service_integration.md +++ b/docs/data-sources/service_integration.md @@ -30,248 +30,248 @@ data "aiven_service_integration" "myintegration" { ### Read-Only -- `clickhouse_kafka_user_config` (List of Object) ClickhouseKafka user configurable settings (see [below for nested schema](#nestedatt--clickhouse_kafka_user_config)) -- `clickhouse_postgresql_user_config` (List of Object) ClickhousePostgresql user configurable settings (see [below for nested schema](#nestedatt--clickhouse_postgresql_user_config)) -- `datadog_user_config` (List of Object) Datadog user configurable settings (see [below for nested schema](#nestedatt--datadog_user_config)) +- `clickhouse_kafka_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config)) +- `clickhouse_postgresql_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config)) +- `datadog_user_config` (Block List) (see [below for nested schema](#nestedblock--datadog_user_config)) - `destination_endpoint_id` (String) Destination endpoint for the integration (if any) -- `external_aws_cloudwatch_metrics_user_config` (List of Object) ExternalAwsCloudwatchMetrics user configurable settings (see [below for nested schema](#nestedatt--external_aws_cloudwatch_metrics_user_config)) +- `external_aws_cloudwatch_metrics_user_config` (Block List) External AWS CloudWatch Metrics integration user config (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config)) - `id` (String) The ID of this resource. - `integration_id` (String) Service Integration Id at aiven -- `kafka_connect_user_config` (List of Object) KafkaConnect user configurable settings (see [below for nested schema](#nestedatt--kafka_connect_user_config)) -- `kafka_logs_user_config` (List of Object) KafkaLogs user configurable settings (see [below for nested schema](#nestedatt--kafka_logs_user_config)) -- `kafka_mirrormaker_user_config` (List of Object) KafkaMirrormaker user configurable settings (see [below for nested schema](#nestedatt--kafka_mirrormaker_user_config)) -- `logs_user_config` (List of Object) Logs user configurable settings (see [below for nested schema](#nestedatt--logs_user_config)) -- `metrics_user_config` (List of Object) Metrics user configurable settings (see [below for nested schema](#nestedatt--metrics_user_config)) +- `kafka_connect_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--kafka_connect_user_config)) +- `kafka_logs_user_config` (Block List) (see [below for nested schema](#nestedblock--kafka_logs_user_config)) +- `kafka_mirrormaker_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config)) +- `logs_user_config` (Block List) (see [below for nested schema](#nestedblock--logs_user_config)) +- `metrics_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--metrics_user_config)) - `source_endpoint_id` (String) Source endpoint for the integration (if any) - + ### Nested Schema for `clickhouse_kafka_user_config` Read-Only: -- `tables` (List of Object) (see [below for nested schema](#nestedobjatt--clickhouse_kafka_user_config--tables)) +- `tables` (Block List) Tables to create (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables)) - + ### Nested Schema for `clickhouse_kafka_user_config.tables` Read-Only: -- `auto_offset_reset` (String) -- `columns` (List of Object) (see [below for nested schema](#nestedobjatt--clickhouse_kafka_user_config--tables--columns)) -- `data_format` (String) -- `date_time_input_format` (String) -- `group_name` (String) -- `handle_error_mode` (String) -- `max_block_size` (Number) -- `max_rows_per_message` (Number) -- `name` (String) -- `num_consumers` (Number) -- `poll_max_batch_size` (Number) -- `skip_broken_messages` (Number) -- `topics` (List of Object) (see [below for nested schema](#nestedobjatt--clickhouse_kafka_user_config--tables--topics)) - - +- `auto_offset_reset` (String) Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`. +- `columns` (Block List) Table columns (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--columns)) +- `data_format` (String) Message data format. The default value is `JSONEachRow`. +- `date_time_input_format` (String) Method to read DateTime from text input formats. The default value is `basic`. +- `group_name` (String) Kafka consumers group. The default value is `clickhouse`. +- `handle_error_mode` (String) How to handle errors for Kafka engine. The default value is `default`. +- `max_block_size` (Number) Number of row collected by poll(s) for flushing data from Kafka. The default value is `0`. +- `max_rows_per_message` (Number) The maximum number of rows produced in one kafka message for row-based formats. The default value is `1`. +- `name` (String) Name of the table. +- `num_consumers` (Number) The number of consumers per table per replica. The default value is `1`. +- `poll_max_batch_size` (Number) Maximum amount of messages to be polled in a single Kafka poll. The default value is `0`. +- `skip_broken_messages` (Number) Skip at least this number of broken messages from Kafka topic per block. The default value is `0`. +- `topics` (Block List) Kafka topics (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--topics)) + + ### Nested Schema for `clickhouse_kafka_user_config.tables.columns` Read-Only: -- `name` (String) -- `type` (String) +- `name` (String) Column name. +- `type` (String) Column type. - + ### Nested Schema for `clickhouse_kafka_user_config.tables.topics` Read-Only: -- `name` (String) +- `name` (String) Name of the topic. - + ### Nested Schema for `clickhouse_postgresql_user_config` Read-Only: -- `databases` (List of Object) (see [below for nested schema](#nestedobjatt--clickhouse_postgresql_user_config--databases)) +- `databases` (Block List) Databases to expose (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config--databases)) - + ### Nested Schema for `clickhouse_postgresql_user_config.databases` Read-Only: -- `database` (String) -- `schema` (String) +- `database` (String) PostgreSQL database to expose. The default value is `defaultdb`. +- `schema` (String) PostgreSQL schema to expose. The default value is `public`. - + ### Nested Schema for `datadog_user_config` Read-Only: -- `datadog_dbm_enabled` (Boolean) -- `datadog_tags` (List of Object) (see [below for nested schema](#nestedobjatt--datadog_user_config--datadog_tags)) -- `exclude_consumer_groups` (List of String) -- `exclude_topics` (List of String) -- `include_consumer_groups` (List of String) -- `include_topics` (List of String) -- `kafka_custom_metrics` (List of String) -- `max_jmx_metrics` (Number) -- `opensearch` (List of Object) (see [below for nested schema](#nestedobjatt--datadog_user_config--opensearch)) -- `redis` (List of Object) (see [below for nested schema](#nestedobjatt--datadog_user_config--redis)) - - +- `datadog_dbm_enabled` (Boolean) Enable Datadog Database Monitoring. +- `datadog_tags` (Block List) Custom tags provided by user (see [below for nested schema](#nestedblock--datadog_user_config--datadog_tags)) +- `exclude_consumer_groups` (List of String) List of custom metrics. +- `exclude_topics` (List of String) List of topics to exclude. +- `include_consumer_groups` (List of String) List of custom metrics. +- `include_topics` (List of String) List of topics to include. +- `kafka_custom_metrics` (List of String) List of custom metrics. +- `max_jmx_metrics` (Number) Maximum number of JMX metrics to send. +- `opensearch` (Block List) Datadog Opensearch Options (see [below for nested schema](#nestedblock--datadog_user_config--opensearch)) +- `redis` (Block List) Datadog Redis Options (see [below for nested schema](#nestedblock--datadog_user_config--redis)) + + ### Nested Schema for `datadog_user_config.datadog_tags` Read-Only: -- `comment` (String) -- `tag` (String) +- `comment` (String) Optional tag explanation. +- `tag` (String) Tag format and usage are described here: https://docs.datadoghq.com/getting_started/tagging. Tags with prefix 'aiven-' are reserved for Aiven. - + ### Nested Schema for `datadog_user_config.opensearch` Read-Only: -- `index_stats_enabled` (Boolean) -- `pending_task_stats_enabled` (Boolean) -- `pshard_stats_enabled` (Boolean) +- `index_stats_enabled` (Boolean) Enable Datadog Opensearch Index Monitoring. +- `pending_task_stats_enabled` (Boolean) Enable Datadog Opensearch Pending Task Monitoring. +- `pshard_stats_enabled` (Boolean) Enable Datadog Opensearch Primary Shard Monitoring. - + ### Nested Schema for `datadog_user_config.redis` Read-Only: -- `command_stats_enabled` (Boolean) +- `command_stats_enabled` (Boolean) Enable command_stats option in the agent's configuration. The default value is `false`. - + ### Nested Schema for `external_aws_cloudwatch_metrics_user_config` Read-Only: -- `dropped_metrics` (List of Object) (see [below for nested schema](#nestedobjatt--external_aws_cloudwatch_metrics_user_config--dropped_metrics)) -- `extra_metrics` (List of Object) (see [below for nested schema](#nestedobjatt--external_aws_cloudwatch_metrics_user_config--extra_metrics)) +- `dropped_metrics` (Block List) Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--dropped_metrics)) +- `extra_metrics` (Block List) Metrics to allow through to AWS CloudWatch (in addition to default metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--extra_metrics)) - + ### Nested Schema for `external_aws_cloudwatch_metrics_user_config.dropped_metrics` Read-Only: -- `field` (String) -- `metric` (String) +- `field` (String) Identifier of a value in the metric. +- `metric` (String) Identifier of the metric. - + ### Nested Schema for `external_aws_cloudwatch_metrics_user_config.extra_metrics` Read-Only: -- `field` (String) -- `metric` (String) +- `field` (String) Identifier of a value in the metric. +- `metric` (String) Identifier of the metric. - + ### Nested Schema for `kafka_connect_user_config` Read-Only: -- `kafka_connect` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_connect_user_config--kafka_connect)) +- `kafka_connect` (Block List) Kafka Connect service configuration values (see [below for nested schema](#nestedblock--kafka_connect_user_config--kafka_connect)) - + ### Nested Schema for `kafka_connect_user_config.kafka_connect` Read-Only: -- `config_storage_topic` (String) -- `group_id` (String) -- `offset_storage_topic` (String) -- `status_storage_topic` (String) +- `config_storage_topic` (String) The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id. +- `group_id` (String) A unique string that identifies the Connect cluster group this worker belongs to. +- `offset_storage_topic` (String) The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id. +- `status_storage_topic` (String) The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id. - + ### Nested Schema for `kafka_logs_user_config` Read-Only: -- `kafka_topic` (String) -- `selected_log_fields` (List of String) +- `kafka_topic` (String) Topic name. +- `selected_log_fields` (List of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent. - + ### Nested Schema for `kafka_mirrormaker_user_config` Read-Only: -- `cluster_alias` (String) -- `kafka_mirrormaker` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_mirrormaker_user_config--kafka_mirrormaker)) +- `cluster_alias` (String) The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, '.', '_', and '-'. +- `kafka_mirrormaker` (Block List) Kafka MirrorMaker configuration values (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--kafka_mirrormaker)) - + ### Nested Schema for `kafka_mirrormaker_user_config.kafka_mirrormaker` Read-Only: -- `consumer_fetch_min_bytes` (Number) -- `producer_batch_size` (Number) -- `producer_buffer_memory` (Number) -- `producer_compression_type` (String) -- `producer_linger_ms` (Number) -- `producer_max_request_size` (Number) +- `consumer_fetch_min_bytes` (Number) The minimum amount of data the server should return for a fetch request. +- `producer_batch_size` (Number) The batch size in bytes producer will attempt to collect before publishing to broker. +- `producer_buffer_memory` (Number) The amount of bytes producer can use for buffering data before publishing to broker. +- `producer_compression_type` (String) Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression. +- `producer_linger_ms` (Number) The linger time (ms) for waiting new data to arrive for publishing. +- `producer_max_request_size` (Number) The maximum request size in bytes. - + ### Nested Schema for `logs_user_config` Read-Only: -- `elasticsearch_index_days_max` (Number) -- `elasticsearch_index_prefix` (String) -- `selected_log_fields` (List of String) +- `elasticsearch_index_days_max` (Number) Elasticsearch index retention limit. The default value is `3`. +- `elasticsearch_index_prefix` (String) Elasticsearch index prefix. The default value is `logs`. +- `selected_log_fields` (List of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent. - + ### Nested Schema for `metrics_user_config` Read-Only: -- `database` (String) -- `retention_days` (Number) -- `ro_username` (String) -- `source_mysql` (List of Object) (see [below for nested schema](#nestedobjatt--metrics_user_config--source_mysql)) -- `username` (String) +- `database` (String) Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to 'metrics'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. +- `retention_days` (Number) Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days. +- `ro_username` (String) Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to 'metrics_reader'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. +- `source_mysql` (Block List) Configuration options for metrics where source service is MySQL (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql)) +- `username` (String) Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to 'metrics_writer'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - + ### Nested Schema for `metrics_user_config.source_mysql` Read-Only: -- `telegraf` (List of Object) (see [below for nested schema](#nestedobjatt--metrics_user_config--source_mysql--telegraf)) +- `telegraf` (Block List) Configuration options for Telegraf MySQL input plugin (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql--telegraf)) - + ### Nested Schema for `metrics_user_config.source_mysql.telegraf` Read-Only: -- `gather_event_waits` (Boolean) -- `gather_file_events_stats` (Boolean) -- `gather_index_io_waits` (Boolean) -- `gather_info_schema_auto_inc` (Boolean) -- `gather_innodb_metrics` (Boolean) -- `gather_perf_events_statements` (Boolean) -- `gather_process_list` (Boolean) -- `gather_slave_status` (Boolean) -- `gather_table_io_waits` (Boolean) -- `gather_table_lock_waits` (Boolean) -- `gather_table_schema` (Boolean) -- `perf_events_statements_digest_text_limit` (Number) -- `perf_events_statements_limit` (Number) -- `perf_events_statements_time_limit` (Number) +- `gather_event_waits` (Boolean) Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS. +- `gather_file_events_stats` (Boolean) gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME. +- `gather_index_io_waits` (Boolean) Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE. +- `gather_info_schema_auto_inc` (Boolean) Gather auto_increment columns and max values from information schema. +- `gather_innodb_metrics` (Boolean) Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS. +- `gather_perf_events_statements` (Boolean) Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST. +- `gather_process_list` (Boolean) Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST. +- `gather_slave_status` (Boolean) Gather metrics from SHOW SLAVE STATUS command output. +- `gather_table_io_waits` (Boolean) Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE. +- `gather_table_lock_waits` (Boolean) Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS. +- `gather_table_schema` (Boolean) Gather metrics from INFORMATION_SCHEMA.TABLES. +- `perf_events_statements_digest_text_limit` (Number) Truncates digest text from perf_events_statements into this many characters. +- `perf_events_statements_limit` (Number) Limits metrics from perf_events_statements. +- `perf_events_statements_time_limit` (Number) Only include perf_events_statements whose last seen is less than this many seconds. diff --git a/docs/resources/service_integration.md b/docs/resources/service_integration.md index e8bef4aed..9218f0576 100644 --- a/docs/resources/service_integration.md +++ b/docs/resources/service_integration.md @@ -33,17 +33,17 @@ resource "aiven_service_integration" "my_integration_metrics" { ### Optional -- `clickhouse_kafka_user_config` (Block List, Max: 1) ClickhouseKafka user configurable settings (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config)) -- `clickhouse_postgresql_user_config` (Block List, Max: 1) ClickhousePostgresql user configurable settings (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config)) -- `datadog_user_config` (Block List, Max: 1) Datadog user configurable settings (see [below for nested schema](#nestedblock--datadog_user_config)) +- `clickhouse_kafka_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config)) +- `clickhouse_postgresql_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config)) +- `datadog_user_config` (Block List) (see [below for nested schema](#nestedblock--datadog_user_config)) - `destination_endpoint_id` (String) Destination endpoint for the integration (if any) - `destination_service_name` (String) Destination service for the integration (if any) -- `external_aws_cloudwatch_metrics_user_config` (Block List, Max: 1) ExternalAwsCloudwatchMetrics user configurable settings (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config)) -- `kafka_connect_user_config` (Block List, Max: 1) KafkaConnect user configurable settings (see [below for nested schema](#nestedblock--kafka_connect_user_config)) -- `kafka_logs_user_config` (Block List, Max: 1) KafkaLogs user configurable settings (see [below for nested schema](#nestedblock--kafka_logs_user_config)) -- `kafka_mirrormaker_user_config` (Block List, Max: 1) KafkaMirrormaker user configurable settings (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config)) -- `logs_user_config` (Block List, Max: 1) Logs user configurable settings (see [below for nested schema](#nestedblock--logs_user_config)) -- `metrics_user_config` (Block List, Max: 1) Metrics user configurable settings (see [below for nested schema](#nestedblock--metrics_user_config)) +- `external_aws_cloudwatch_metrics_user_config` (Block List) External AWS CloudWatch Metrics integration user config (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config)) +- `kafka_connect_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--kafka_connect_user_config)) +- `kafka_logs_user_config` (Block List) (see [below for nested schema](#nestedblock--kafka_logs_user_config)) +- `kafka_mirrormaker_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config)) +- `logs_user_config` (Block List) (see [below for nested schema](#nestedblock--logs_user_config)) +- `metrics_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--metrics_user_config)) - `source_endpoint_id` (String) Source endpoint for the integration (if any) - `source_service_name` (String) Source service for the integration (if any) - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) @@ -58,7 +58,7 @@ resource "aiven_service_integration" "my_integration_metrics" { Optional: -- `tables` (Block List, Max: 100) Tables to create. (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables)) +- `tables` (Block List) Tables to create (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables)) ### Nested Schema for `clickhouse_kafka_user_config.tables` @@ -72,7 +72,7 @@ Required: Optional: - `auto_offset_reset` (String) Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`. -- `columns` (Block List, Max: 100) Table columns. (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--columns)) +- `columns` (Block List) Table columns (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--columns)) - `date_time_input_format` (String) Method to read DateTime from text input formats. The default value is `basic`. - `handle_error_mode` (String) How to handle errors for Kafka engine. The default value is `default`. - `max_block_size` (Number) Number of row collected by poll(s) for flushing data from Kafka. The default value is `0`. @@ -80,7 +80,7 @@ Optional: - `num_consumers` (Number) The number of consumers per table per replica. The default value is `1`. - `poll_max_batch_size` (Number) Maximum amount of messages to be polled in a single Kafka poll. The default value is `0`. - `skip_broken_messages` (Number) Skip at least this number of broken messages from Kafka topic per block. The default value is `0`. -- `topics` (Block List, Max: 100) Kafka topics. (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--topics)) +- `topics` (Block List) Kafka topics (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--topics)) ### Nested Schema for `clickhouse_kafka_user_config.tables.columns` @@ -106,7 +106,7 @@ Required: Optional: -- `databases` (Block List, Max: 10) Databases to expose. (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config--databases)) +- `databases` (Block List) Databases to expose (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config--databases)) ### Nested Schema for `clickhouse_postgresql_user_config.databases` @@ -124,15 +124,15 @@ Optional: Optional: - `datadog_dbm_enabled` (Boolean) Enable Datadog Database Monitoring. -- `datadog_tags` (Block List, Max: 32) Custom tags provided by user. (see [below for nested schema](#nestedblock--datadog_user_config--datadog_tags)) +- `datadog_tags` (Block List) Custom tags provided by user (see [below for nested schema](#nestedblock--datadog_user_config--datadog_tags)) - `exclude_consumer_groups` (List of String) List of custom metrics. - `exclude_topics` (List of String) List of topics to exclude. - `include_consumer_groups` (List of String) List of custom metrics. - `include_topics` (List of String) List of topics to include. - `kafka_custom_metrics` (List of String) List of custom metrics. - `max_jmx_metrics` (Number) Maximum number of JMX metrics to send. -- `opensearch` (Block List, Max: 1) Datadog Opensearch Options. (see [below for nested schema](#nestedblock--datadog_user_config--opensearch)) -- `redis` (Block List, Max: 1) Datadog Redis Options. (see [below for nested schema](#nestedblock--datadog_user_config--redis)) +- `opensearch` (Block List) Datadog Opensearch Options (see [below for nested schema](#nestedblock--datadog_user_config--opensearch)) +- `redis` (Block List) Datadog Redis Options (see [below for nested schema](#nestedblock--datadog_user_config--redis)) ### Nested Schema for `datadog_user_config.datadog_tags` @@ -170,8 +170,8 @@ Optional: Optional: -- `dropped_metrics` (Block List, Max: 1024) Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics). (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--dropped_metrics)) -- `extra_metrics` (Block List, Max: 1024) Metrics to allow through to AWS CloudWatch (in addition to default metrics). (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--extra_metrics)) +- `dropped_metrics` (Block List) Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--dropped_metrics)) +- `extra_metrics` (Block List) Metrics to allow through to AWS CloudWatch (in addition to default metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--extra_metrics)) ### Nested Schema for `external_aws_cloudwatch_metrics_user_config.dropped_metrics` @@ -197,7 +197,7 @@ Required: Optional: -- `kafka_connect` (Block List, Max: 1) Kafka Connect service configuration values. (see [below for nested schema](#nestedblock--kafka_connect_user_config--kafka_connect)) +- `kafka_connect` (Block List) Kafka Connect service configuration values (see [below for nested schema](#nestedblock--kafka_connect_user_config--kafka_connect)) ### Nested Schema for `kafka_connect_user_config.kafka_connect` @@ -229,7 +229,7 @@ Optional: Optional: - `cluster_alias` (String) The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, '.', '_', and '-'. -- `kafka_mirrormaker` (Block List, Max: 1) Kafka MirrorMaker configuration values. (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--kafka_mirrormaker)) +- `kafka_mirrormaker` (Block List) Kafka MirrorMaker configuration values (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--kafka_mirrormaker)) ### Nested Schema for `kafka_mirrormaker_user_config.kafka_mirrormaker` @@ -263,7 +263,7 @@ Optional: - `database` (String) Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to 'metrics'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - `retention_days` (Number) Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days. - `ro_username` (String) Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to 'metrics_reader'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. -- `source_mysql` (Block List, Max: 1) Configuration options for metrics where source service is MySQL. (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql)) +- `source_mysql` (Block List) Configuration options for metrics where source service is MySQL (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql)) - `username` (String) Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to 'metrics_writer'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. @@ -271,7 +271,7 @@ Optional: Optional: -- `telegraf` (Block List, Max: 1) Configuration options for Telegraf MySQL input plugin. (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql--telegraf)) +- `telegraf` (Block List) Configuration options for Telegraf MySQL input plugin (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql--telegraf)) ### Nested Schema for `metrics_user_config.source_mysql.telegraf` @@ -301,11 +301,10 @@ Optional: Optional: -- `create` (String) -- `default` (String) -- `delete` (String) -- `read` (String) -- `update` (String) +- `create` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). +- `delete` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs. +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled. +- `update` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). ## Import Import is supported using the following syntax: ```shell diff --git a/go.mod b/go.mod index ede4fd69d..fab4215a2 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.21.1 require ( github.com/aiven/aiven-go-client/v2 v2.1.0 + github.com/avast/retry-go v3.0.0+incompatible github.com/dave/jennifer v1.7.0 github.com/docker/go-units v0.5.0 github.com/ettle/strcase v0.1.1 @@ -15,9 +16,12 @@ require ( github.com/hashicorp/terraform-plugin-mux v0.12.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.29.0 github.com/kelseyhightower/envconfig v1.4.0 + github.com/liip/sheriff v0.11.1 + github.com/stoewer/go-strcase v1.3.0 github.com/stretchr/testify v1.8.4 golang.org/x/exp v0.0.0-20230809150735-7b3493d9a819 golang.org/x/sync v0.3.0 + golang.org/x/tools v0.6.0 gopkg.in/yaml.v3 v3.0.1 ) @@ -72,7 +76,7 @@ require ( github.com/hashicorp/terraform-registry-address v0.2.2 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect github.com/hashicorp/yamux v0.1.1 // indirect - github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a // indirect + github.com/jinzhu/copier v0.4.0 github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/klauspost/compress v1.15.11 // indirect github.com/mattn/go-colorable v0.1.13 // indirect diff --git a/go.sum b/go.sum index e2755f8ee..78c4d5e40 100644 --- a/go.sum +++ b/go.sum @@ -212,6 +212,8 @@ github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJE github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0= +github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= github.com/aws/aws-sdk-go v1.44.122 h1:p6mw01WBaNpbdP2xrisz5tIkcNwzj/HysobNoaAHjgo= github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= @@ -404,6 +406,7 @@ github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoD github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v0.0.0-20161031182605-e96d38404026/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -447,8 +450,8 @@ github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOl github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= -github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a h1:zPPuIq2jAWWPTrGt70eK/BSch+gFAGrNzecsoENgu2o= -github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a/go.mod h1:yL958EeXv8Ylng6IfnvG4oflryUi3vgA3xPs9hmII1s= +github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= +github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -472,6 +475,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/liip/sheriff v0.11.1 h1:52YGzskXFPSEnwfEtXnbPiMKKXJGm5IP45s8Ogw0Wyk= +github.com/liip/sheriff v0.11.1/go.mod h1:nVTQYHxfdIfOHnk5FREt4j6cnaSlJPUfXFVORfgGmTo= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= @@ -522,6 +527,8 @@ github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJC github.com/skeema/knownhosts v1.2.0/go.mod h1:g4fPeYpque7P0xefxtGzV81ihjC8sX2IqpAoNkjxbMo= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= diff --git a/internal/plugin/provider.go b/internal/plugin/provider.go index 435879e7f..9a64a061d 100644 --- a/internal/plugin/provider.go +++ b/internal/plugin/provider.go @@ -14,6 +14,7 @@ import ( "github.com/aiven/terraform-provider-aiven/internal/common" "github.com/aiven/terraform-provider-aiven/internal/plugin/errmsg" "github.com/aiven/terraform-provider-aiven/internal/plugin/service/organization" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/serviceintegration" ) // AivenProvider is the provider implementation for Aiven. @@ -110,6 +111,7 @@ func (p *AivenProvider) Configure( func (p *AivenProvider) Resources(context.Context) []func() resource.Resource { return []func() resource.Resource{ organization.NewOrganizationResource, + serviceintegration.NewServiceIntegrationResource, } } @@ -117,6 +119,7 @@ func (p *AivenProvider) Resources(context.Context) []func() resource.Resource { func (p *AivenProvider) DataSources(context.Context) []func() datasource.DataSource { return []func() datasource.DataSource{ organization.NewOrganizationDataSource, + serviceintegration.NewServiceIntegrationDataSource, } } diff --git a/internal/plugin/service/serviceintegration/models.go b/internal/plugin/service/serviceintegration/models.go new file mode 100644 index 000000000..c499147c6 --- /dev/null +++ b/internal/plugin/service/serviceintegration/models.go @@ -0,0 +1,106 @@ +package serviceintegration + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +const ( + idProjectIndex = 0 + idIntegrationIDIndex = 1 +) + +// Plugin framework doesn't support embedded structs +// https://github.com/hashicorp/terraform-plugin-framework/issues/242 +// We use resourceModel as base model, and copy state to/from dataSourceModel for datasource +type resourceModel struct { + Timeouts timeouts.Value `tfsdk:"timeouts"` + ID types.String `tfsdk:"id" copier:"ID"` + Project types.String `tfsdk:"project" copier:"Project"` + IntegrationID types.String `tfsdk:"integration_id" copier:"IntegrationID"` + DestinationEndpointID types.String `tfsdk:"destination_endpoint_id" copier:"DestinationEndpointID"` + DestinationServiceName types.String `tfsdk:"destination_service_name" copier:"DestinationServiceName"` + IntegrationType types.String `tfsdk:"integration_type" copier:"IntegrationType"` + SourceEndpointID types.String `tfsdk:"source_endpoint_id" copier:"SourceEndpointID"` + SourceServiceName types.String `tfsdk:"source_service_name" copier:"SourceServiceName"` + ClickhouseKafkaUserConfig types.List `tfsdk:"clickhouse_kafka_user_config" copier:"ClickhouseKafkaUserConfig"` + ClickhousePostgresqlUserConfig types.List `tfsdk:"clickhouse_postgresql_user_config" copier:"ClickhousePostgresqlUserConfig"` + DatadogUserConfig types.List `tfsdk:"datadog_user_config" copier:"DatadogUserConfig"` + ExternalAwsCloudwatchMetricsUserConfig types.List `tfsdk:"external_aws_cloudwatch_metrics_user_config" copier:"ExternalAwsCloudwatchMetricsUserConfig"` + KafkaConnectUserConfig types.List `tfsdk:"kafka_connect_user_config" copier:"KafkaConnectUserConfig"` + KafkaLogsUserConfig types.List `tfsdk:"kafka_logs_user_config" copier:"KafkaLogsUserConfig"` + KafkaMirrormakerUserConfig types.List `tfsdk:"kafka_mirrormaker_user_config" copier:"KafkaMirrormakerUserConfig"` + LogsUserConfig types.List `tfsdk:"logs_user_config" copier:"LogsUserConfig"` + MetricsUserConfig types.List `tfsdk:"metrics_user_config" copier:"MetricsUserConfig"` +} + +type dataSourceModel struct { + ID types.String `tfsdk:"id" copier:"ID"` + Project types.String `tfsdk:"project" copier:"Project"` + IntegrationID types.String `tfsdk:"integration_id" copier:"IntegrationID"` + DestinationEndpointID types.String `tfsdk:"destination_endpoint_id" copier:"DestinationEndpointID"` + DestinationServiceName types.String `tfsdk:"destination_service_name" copier:"DestinationServiceName"` + IntegrationType types.String `tfsdk:"integration_type" copier:"IntegrationType"` + SourceEndpointID types.String `tfsdk:"source_endpoint_id" copier:"SourceEndpointID"` + SourceServiceName types.String `tfsdk:"source_service_name" copier:"SourceServiceName"` + ClickhouseKafkaUserConfig types.List `tfsdk:"clickhouse_kafka_user_config" copier:"ClickhouseKafkaUserConfig"` + ClickhousePostgresqlUserConfig types.List `tfsdk:"clickhouse_postgresql_user_config" copier:"ClickhousePostgresqlUserConfig"` + DatadogUserConfig types.List `tfsdk:"datadog_user_config" copier:"DatadogUserConfig"` + ExternalAwsCloudwatchMetricsUserConfig types.List `tfsdk:"external_aws_cloudwatch_metrics_user_config" copier:"ExternalAwsCloudwatchMetricsUserConfig"` + KafkaConnectUserConfig types.List `tfsdk:"kafka_connect_user_config" copier:"KafkaConnectUserConfig"` + KafkaLogsUserConfig types.List `tfsdk:"kafka_logs_user_config" copier:"KafkaLogsUserConfig"` + KafkaMirrormakerUserConfig types.List `tfsdk:"kafka_mirrormaker_user_config" copier:"KafkaMirrormakerUserConfig"` + LogsUserConfig types.List `tfsdk:"logs_user_config" copier:"LogsUserConfig"` + MetricsUserConfig types.List `tfsdk:"metrics_user_config" copier:"MetricsUserConfig"` +} + +func (p *resourceModel) getID() string { + i := p.IntegrationID.ValueString() + if i != "" { + return i + } + return getIDIndex(p.ID.ValueString(), idIntegrationIDIndex) +} + +func (p *resourceModel) getProject() string { + project := p.Project.ValueString() + if project != "" { + return project + } + return getIDIndex(p.ID.ValueString(), idProjectIndex) +} + +func getIDIndex(s string, i int) string { + list := strings.Split(s, "/") + if i < len(list) { + return list[i] + } + return "" +} + +func getEndpointIDPointer(s string) *string { + id := getIDIndex(s, idIntegrationIDIndex) + if s == "" { + return nil + } + return &id +} + +func getProjectPointer(s string) *string { + id := getIDIndex(s, idProjectIndex) + if s == "" { + return nil + } + return &id +} + +func newEndpointID(project string, s *string) types.String { + if s != nil { + v := fmt.Sprintf("%s/%s", project, *s) + s = &v + } + return types.StringPointerValue(s) +} diff --git a/internal/plugin/service/serviceintegration/service_integration_data_source.go b/internal/plugin/service/serviceintegration/service_integration_data_source.go new file mode 100644 index 000000000..12e624d6a --- /dev/null +++ b/internal/plugin/service/serviceintegration/service_integration_data_source.go @@ -0,0 +1,140 @@ +package serviceintegration + +import ( + "context" + + "github.com/aiven/aiven-go-client/v2" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/jinzhu/copier" + + "github.com/aiven/terraform-provider-aiven/internal/plugin/errmsg" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/clickhousekafka" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/clickhousepostgresql" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/datadog" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkaconnect" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkalogs" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkamirrormaker" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/logs" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/metrics" + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +var ( + _ datasource.DataSource = &serviceIntegrationDataSource{} + _ datasource.DataSourceWithConfigure = &serviceIntegrationDataSource{} +) + +func NewServiceIntegrationDataSource() datasource.DataSource { + return &serviceIntegrationDataSource{} +} + +type serviceIntegrationDataSource struct { + client *aiven.Client +} + +func (s *serviceIntegrationDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, _ *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + s.client = req.ProviderData.(*aiven.Client) +} + +func (s *serviceIntegrationDataSource) Metadata(_ context.Context, _ datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = "aiven_service_integration" +} + +func (s *serviceIntegrationDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "The Service Integration data source provides information about the existing Aiven Service Integration.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + Validators: []validator.String{endpointIDValidator}, + }, + "integration_id": schema.StringAttribute{ + Description: "Service Integration Id at aiven", + Computed: true, + }, + "destination_endpoint_id": schema.StringAttribute{ + Description: "Destination endpoint for the integration (if any)", + Computed: true, + Validators: []validator.String{endpointIDValidator}, + }, + "destination_service_name": schema.StringAttribute{ + Description: "Destination service for the integration (if any)", + Required: true, + }, + "integration_type": schema.StringAttribute{ + Description: "Type of the service integration. Possible values: " + schemautil.JoinQuoted(integrationTypes(), ", ", "`"), + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(integrationTypes()...), + }, + }, + "project": schema.StringAttribute{ + Description: "Project the integration belongs to", + Required: true, + }, + "source_endpoint_id": schema.StringAttribute{ + Description: "Source endpoint for the integration (if any)", + Computed: true, + Validators: []validator.String{endpointIDValidator}, + }, + "source_service_name": schema.StringAttribute{ + Description: "Source service for the integration (if any)", + Required: true, + }, + }, + Blocks: map[string]schema.Block{ + "clickhouse_kafka_user_config": clickhousekafka.NewDataSourceSchema(), + "clickhouse_postgresql_user_config": clickhousepostgresql.NewDataSourceSchema(), + "datadog_user_config": datadog.NewDataSourceSchema(), + "external_aws_cloudwatch_metrics_user_config": externalawscloudwatchmetrics.NewDataSourceSchema(), + "kafka_connect_user_config": kafkaconnect.NewDataSourceSchema(), + "kafka_logs_user_config": kafkalogs.NewDataSourceSchema(), + "kafka_mirrormaker_user_config": kafkamirrormaker.NewDataSourceSchema(), + "logs_user_config": logs.NewDataSourceSchema(), + "metrics_user_config": metrics.NewDataSourceSchema(), + }, + } +} + +// Read reads datasource +// All functions adapted for resourceModel, so we use it as donor +// Copies state from datasource to resource, then back, when things are done +func (s *serviceIntegrationDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var o dataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &o)...) + if resp.Diagnostics.HasError() { + return + } + + var res resourceModel + err := copier.Copy(&res, &o) + if err != nil { + resp.Diagnostics.AddError("data config copy error", err.Error()) + } + + dto, err := getSIByName(ctx, s.client, &res) + if err != nil { + resp.Diagnostics.AddError(errmsg.SummaryErrorReadingResource, err.Error()) + return + } + + loadFromDTO(ctx, &resp.Diagnostics, &res, dto) + if resp.Diagnostics.HasError() { + return + } + + err = copier.Copy(&o, &res) + if err != nil { + resp.Diagnostics.AddError("dto copy error", err.Error()) + } + + resp.Diagnostics.Append(resp.State.Set(ctx, o)...) +} diff --git a/internal/plugin/service/serviceintegration/service_integration_resource.go b/internal/plugin/service/serviceintegration/service_integration_resource.go new file mode 100644 index 000000000..60a737bd3 --- /dev/null +++ b/internal/plugin/service/serviceintegration/service_integration_resource.go @@ -0,0 +1,340 @@ +package serviceintegration + +import ( + "context" + "fmt" + "regexp" + + "github.com/aiven/aiven-go-client/v2" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/aiven/terraform-provider-aiven/internal/plugin/errmsg" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/clickhousekafka" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/clickhousepostgresql" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/datadog" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkaconnect" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkalogs" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkamirrormaker" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/logs" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/metrics" + "github.com/aiven/terraform-provider-aiven/internal/plugin/util" + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +var endpointIDValidator = stringvalidator.RegexMatches( + regexp.MustCompile(`^[a-zA-Z0-9_-]*/[a-zA-Z0-9_-]*$`), + "endpoint id should have the following format: project_name/endpoint_id", +) + +var ( + _ resource.Resource = &serviceIntegrationResource{} + _ resource.ResourceWithConfigure = &serviceIntegrationResource{} + _ resource.ResourceWithImportState = &serviceIntegrationResource{} +) + +func NewServiceIntegrationResource() resource.Resource { + return &serviceIntegrationResource{} +} + +type serviceIntegrationResource struct { + client *aiven.Client +} + +func (s *serviceIntegrationResource) Configure(_ context.Context, req resource.ConfigureRequest, _ *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + s.client = req.ProviderData.(*aiven.Client) +} + +func (s *serviceIntegrationResource) Metadata(_ context.Context, _ resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = "aiven_service_integration" +} + +func (s *serviceIntegrationResource) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = util.GeneralizeSchema(ctx, schema.Schema{ + Description: "The Service Integration resource allows the creation and management of Aiven Service Integrations.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + Validators: []validator.String{endpointIDValidator}, + }, + "integration_id": schema.StringAttribute{ + Description: "Service Integration Id at aiven", + Computed: true, + }, + "destination_endpoint_id": schema.StringAttribute{ + Description: "Destination endpoint for the integration (if any)", + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Optional: true, + Validators: []validator.String{ + endpointIDValidator, + stringvalidator.ExactlyOneOf( + path.MatchRoot("destination_endpoint_id"), + path.MatchRoot("destination_service_name"), + ), + }, + }, + "destination_service_name": schema.StringAttribute{ + Description: "Destination service for the integration (if any)", + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Optional: true, + }, + "integration_type": schema.StringAttribute{ + Description: "Type of the service integration. Possible values: " + schemautil.JoinQuoted(integrationTypes(), ", ", "`"), + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(integrationTypes()...), + }, + }, + "project": schema.StringAttribute{ + Description: "Project the integration belongs to", + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Required: true, + }, + "source_endpoint_id": schema.StringAttribute{ + Description: "Source endpoint for the integration (if any)", + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Optional: true, + Validators: []validator.String{ + endpointIDValidator, + stringvalidator.ExactlyOneOf( + path.MatchRoot("source_endpoint_id"), + path.MatchRoot("source_service_name"), + ), + }, + }, + "source_service_name": schema.StringAttribute{ + Description: "Source service for the integration (if any)", + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Optional: true, + }, + }, + Blocks: map[string]schema.Block{ + "clickhouse_kafka_user_config": clickhousekafka.NewResourceSchema(), + "clickhouse_postgresql_user_config": clickhousepostgresql.NewResourceSchema(), + "datadog_user_config": datadog.NewResourceSchema(), + "external_aws_cloudwatch_metrics_user_config": externalawscloudwatchmetrics.NewResourceSchema(), + "kafka_connect_user_config": kafkaconnect.NewResourceSchema(), + "kafka_logs_user_config": kafkalogs.NewResourceSchema(), + "kafka_mirrormaker_user_config": kafkamirrormaker.NewResourceSchema(), + "logs_user_config": logs.NewResourceSchema(), + "metrics_user_config": metrics.NewResourceSchema(), + }, + }) +} + +func (s *serviceIntegrationResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var o resourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &o)...) + if resp.Diagnostics.HasError() { + return + } + + // read_replicas can be only be created alongside the service. also the only way to promote the replica + // is to delete the service integration that was created, so we should make it least painful to do so. + // for now, we support to seemlessly import preexisting 'read_replica' service integrations in the resource create + // all other integrations should be imported using `terraform import` + if o.IntegrationType.ValueString() == readReplicaType { + if preexisting, err := getSIByName(ctx, s.client, &o); err != nil { + resp.Diagnostics.AddError("unable to search for possible preexisting 'read_replica' service integration", err.Error()) + return + } else if preexisting != nil { + o.IntegrationID = types.StringValue(preexisting.ServiceIntegrationID) + s.read(ctx, &resp.Diagnostics, &resp.State, &o) + return + } + } + + userConfig, err := expandUserConfig(ctx, &resp.Diagnostics, &o, true) + if err != nil { + resp.Diagnostics.AddError("Failed to expand user config", err.Error()) + return + } + createReq := aiven.CreateServiceIntegrationRequest{ + DestinationProject: getProjectPointer(o.DestinationEndpointID.ValueString()), + DestinationEndpointID: getEndpointIDPointer(o.DestinationEndpointID.ValueString()), + DestinationService: o.DestinationServiceName.ValueStringPointer(), + IntegrationType: o.IntegrationType.ValueString(), + SourceProject: getProjectPointer(o.SourceEndpointID.ValueString()), + SourceEndpointID: getEndpointIDPointer(o.SourceEndpointID.ValueString()), + SourceService: o.SourceServiceName.ValueStringPointer(), + UserConfig: userConfig, + } + + dto, err := s.client.ServiceIntegrations.Create(ctx, o.Project.ValueString(), createReq) + if err != nil { + resp.Diagnostics.AddError(errmsg.SummaryErrorCreatingResource, err.Error()) + return + } + + o.IntegrationID = types.StringValue(dto.ServiceIntegrationID) + s.read(ctx, &resp.Diagnostics, &resp.State, &o) +} + +func (s *serviceIntegrationResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var o resourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &o)...) + if resp.Diagnostics.HasError() { + return + } + s.read(ctx, &resp.Diagnostics, &resp.State, &o) +} + +func (s *serviceIntegrationResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var state resourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + var o resourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &o)...) + if resp.Diagnostics.HasError() { + return + } + + // Copies ID from the state + o.IntegrationID = state.IntegrationID + userConfig, err := expandUserConfig(ctx, &resp.Diagnostics, &o, false) + if err != nil { + resp.Diagnostics.AddError("Failed to expand user config", err.Error()) + return + } + + _, err = s.client.ServiceIntegrations.Update( + ctx, + state.Project.ValueString(), + state.IntegrationID.ValueString(), + aiven.UpdateServiceIntegrationRequest{ + UserConfig: userConfig, + }, + ) + + if err != nil { + resp.Diagnostics.AddError(errmsg.SummaryErrorUpdatingResource, err.Error()) + return + } + + s.read(ctx, &resp.Diagnostics, &resp.State, &o) +} + +func (s *serviceIntegrationResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var o resourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &o)...) + if resp.Diagnostics.HasError() { + return + } + + err := s.client.ServiceIntegrations.Delete(ctx, o.Project.ValueString(), o.IntegrationID.ValueString()) + if err != nil { + resp.Diagnostics.AddError(errmsg.SummaryErrorDeletingResource, err.Error()) + } +} + +func (s *serviceIntegrationResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} + +// read reads from API and saves to state +func (s *serviceIntegrationResource) read(ctx context.Context, diags *diag.Diagnostics, state *tfsdk.State, o *resourceModel) { + dto, err := getSIByID(ctx, s.client, o) + if err != nil { + diags.AddError(errmsg.SummaryErrorReadingResource, err.Error()) + return + } + + loadFromDTO(ctx, diags, o, dto) + if diags.HasError() { + return + } + diags.Append(state.Set(ctx, o)...) +} + +// getSIByID gets ServiceIntegration by ID +func getSIByID(ctx context.Context, client *aiven.Client, o *resourceModel) (dto *aiven.ServiceIntegration, err error) { + id := o.getID() + project := o.getProject() + if len(id)*len(project) == 0 { + return nil, fmt.Errorf("no ID or project provided") + } + + return dto, util.WaitActive(ctx, func() error { + dto, err = client.ServiceIntegrations.Get(ctx, project, id) + if err != nil { + return err + } + if !dto.Active { + return fmt.Errorf("service integration is not active") + } + return nil + }) +} + +// getSIByName gets ServiceIntegration by name +func getSIByName(ctx context.Context, client *aiven.Client, o *resourceModel) (*aiven.ServiceIntegration, error) { + project := o.Project.ValueString() + integrationType := o.IntegrationType.ValueString() + sourceServiceName := o.SourceServiceName.ValueString() + destinationServiceName := o.DestinationServiceName.ValueString() + + integrations, err := client.ServiceIntegrations.List(ctx, project, sourceServiceName) + if err != nil && !aiven.IsNotFound(err) { + return nil, fmt.Errorf("unable to get list of service integrations: %s", err) + } + + for _, i := range integrations { + if i.SourceService == nil || i.DestinationService == nil || i.ServiceIntegrationID == "" { + continue + } + + if i.IntegrationType == integrationType && + *i.SourceService == sourceServiceName && + *i.DestinationService == destinationServiceName { + return i, nil + } + } + + return nil, nil +} + +// loadFromDTO loads API values to terraform object +func loadFromDTO(ctx context.Context, diags *diag.Diagnostics, o *resourceModel, dto *aiven.ServiceIntegration) { + flattenUserConfig(ctx, diags, o, dto) + if diags.HasError() { + return + } + + id := o.getID() + project := o.getProject() + o.ID = newEndpointID(project, &id) + o.DestinationEndpointID = newEndpointID(project, dto.DestinationEndpointID) + o.DestinationServiceName = types.StringPointerValue(dto.DestinationService) + o.IntegrationType = types.StringValue(dto.IntegrationType) + o.SourceEndpointID = newEndpointID(project, dto.SourceEndpointID) + o.SourceServiceName = types.StringPointerValue(dto.SourceService) +} diff --git a/internal/plugin/service/serviceintegration/userconfig.go b/internal/plugin/service/serviceintegration/userconfig.go new file mode 100644 index 000000000..901eaf406 --- /dev/null +++ b/internal/plugin/service/serviceintegration/userconfig.go @@ -0,0 +1,136 @@ +package serviceintegration + +import ( + "context" + + "github.com/aiven/aiven-go-client/v2" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/clickhousekafka" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/clickhousepostgresql" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/datadog" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkaconnect" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkalogs" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkamirrormaker" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/logs" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/metrics" + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +const ( + clickhouseKafkaType = "clickhouse_kafka" + clickhousePostgresqlType = "clickhouse_postgresql" + datadogType = "datadog" + externalAwsCloudwatchMetricsType = "external_aws_cloudwatch_metrics" + kafkaConnectType = "kafka_connect" + kafkaLogsType = "kafka_logs" + kafkaMirrormakerType = "kafka_mirrormaker" + logsType = "logs" + metricsType = "metrics" + readReplicaType = "read_replica" +) + +func integrationTypes() []string { + return []string{ + "alertmanager", + "cassandra_cross_service_cluster", + clickhouseKafkaType, + clickhousePostgresqlType, + "dashboard", + datadogType, + "datasource", + "external_aws_cloudwatch_logs", + externalAwsCloudwatchMetricsType, + "external_elasticsearch_logs", + "external_google_cloud_logging", + "external_opensearch_logs", + "flink", + "internal_connectivity", + "jolokia", + kafkaConnectType, + kafkaLogsType, + kafkaMirrormakerType, + logsType, + "m3aggregator", + "m3coordinator", + metricsType, + "opensearch_cross_cluster_replication", + "opensearch_cross_cluster_search", + "prometheus", + readReplicaType, + "rsyslog", + "schema_registry_proxy", + } +} + +// flattenUserConfig from aiven to terraform +func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *resourceModel, dto *aiven.ServiceIntegration) { + if dto.UserConfig == nil { + return + } + + // We set user config from Aiven only if it's been set in TF + // Otherwise it will produce invalid "after apply" + switch { + case isSet(o.ClickhouseKafkaUserConfig): + o.ClickhouseKafkaUserConfig = clickhousekafka.Flatten(ctx, diags, dto.UserConfig) + case isSet(o.ClickhousePostgresqlUserConfig): + o.ClickhousePostgresqlUserConfig = clickhousepostgresql.Flatten(ctx, diags, dto.UserConfig) + case isSet(o.DatadogUserConfig): + o.DatadogUserConfig = datadog.Flatten(ctx, diags, dto.UserConfig) + case isSet(o.ExternalAwsCloudwatchMetricsUserConfig): + o.ExternalAwsCloudwatchMetricsUserConfig = externalawscloudwatchmetrics.Flatten(ctx, diags, dto.UserConfig) + case isSet(o.KafkaConnectUserConfig): + o.KafkaConnectUserConfig = kafkaconnect.Flatten(ctx, diags, dto.UserConfig) + case isSet(o.KafkaLogsUserConfig): + o.KafkaLogsUserConfig = kafkalogs.Flatten(ctx, diags, dto.UserConfig) + case isSet(o.KafkaMirrormakerUserConfig): + o.KafkaMirrormakerUserConfig = kafkamirrormaker.Flatten(ctx, diags, dto.UserConfig) + case isSet(o.LogsUserConfig): + o.LogsUserConfig = logs.Flatten(ctx, diags, dto.UserConfig) + case isSet(o.MetricsUserConfig): + o.MetricsUserConfig = metrics.Flatten(ctx, diags, dto.UserConfig) + } +} + +// expandUserConfig from terraform to aiven +func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *resourceModel, create bool) (map[string]any, error) { + var marshal func(any) (map[string]any, error) + if create { + marshal = schemautil.MarshalCreateUserConfig + } else { + marshal = schemautil.MarshalUpdateUserConfig + } + + // If invalid integration type is set + // This will send wrong config to Aiven + // Which is sort of a validation too + switch { + case isSet(o.ClickhouseKafkaUserConfig): + return marshal(clickhousekafka.Expand(ctx, diags, o.ClickhouseKafkaUserConfig)) + case isSet(o.ClickhousePostgresqlUserConfig): + return marshal(clickhousepostgresql.Expand(ctx, diags, o.ClickhousePostgresqlUserConfig)) + case isSet(o.DatadogUserConfig): + return marshal(datadog.Expand(ctx, diags, o.DatadogUserConfig)) + case isSet(o.ExternalAwsCloudwatchMetricsUserConfig): + return marshal(externalawscloudwatchmetrics.Expand(ctx, diags, o.ExternalAwsCloudwatchMetricsUserConfig)) + case isSet(o.KafkaConnectUserConfig): + return marshal(kafkaconnect.Expand(ctx, diags, o.KafkaConnectUserConfig)) + case isSet(o.KafkaLogsUserConfig): + return marshal(kafkalogs.Expand(ctx, diags, o.KafkaLogsUserConfig)) + case isSet(o.KafkaMirrormakerUserConfig): + return marshal(kafkamirrormaker.Expand(ctx, diags, o.KafkaMirrormakerUserConfig)) + case isSet(o.LogsUserConfig): + return marshal(logs.Expand(ctx, diags, o.LogsUserConfig)) + case isSet(o.MetricsUserConfig): + return marshal(metrics.Expand(ctx, diags, o.MetricsUserConfig)) + default: + return nil, nil + } +} + +func isSet(o types.List) bool { + return !(o.IsUnknown() || o.IsNull()) +} diff --git a/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka.go b/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka.go new file mode 100644 index 000000000..02b903200 --- /dev/null +++ b/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka.go @@ -0,0 +1,408 @@ +// Code generated by user config generator. DO NOT EDIT. + +package clickhousekafka + +import ( + "context" + + listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + attr "github.com/hashicorp/terraform-plugin-framework/attr" + datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + diag "github.com/hashicorp/terraform-plugin-framework/diag" + resource "github.com/hashicorp/terraform-plugin-framework/resource/schema" + int64default "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64default" + stringdefault "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + validator "github.com/hashicorp/terraform-plugin-framework/schema/validator" + types "github.com/hashicorp/terraform-plugin-framework/types" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +// NewResourceSchema returns resource schema +func NewResourceSchema() resource.ListNestedBlock { + return resource.ListNestedBlock{ + Description: "Integration user config", + NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"tables": resource.ListNestedBlock{ + Description: "Tables to create", + NestedObject: resource.NestedBlockObject{ + Attributes: map[string]resource.Attribute{ + "auto_offset_reset": resource.StringAttribute{ + Computed: true, + Default: stringdefault.StaticString("earliest"), + Description: "Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.", + Optional: true, + }, + "data_format": resource.StringAttribute{ + Description: "Message data format. The default value is `JSONEachRow`.", + Required: true, + }, + "date_time_input_format": resource.StringAttribute{ + Computed: true, + Default: stringdefault.StaticString("basic"), + Description: "Method to read DateTime from text input formats. The default value is `basic`.", + Optional: true, + }, + "group_name": resource.StringAttribute{ + Description: "Kafka consumers group. The default value is `clickhouse`.", + Required: true, + }, + "handle_error_mode": resource.StringAttribute{ + Computed: true, + Default: stringdefault.StaticString("default"), + Description: "How to handle errors for Kafka engine. The default value is `default`.", + Optional: true, + }, + "max_block_size": resource.Int64Attribute{ + Computed: true, + Default: int64default.StaticInt64(0), + Description: "Number of row collected by poll(s) for flushing data from Kafka. The default value is `0`.", + Optional: true, + }, + "max_rows_per_message": resource.Int64Attribute{ + Computed: true, + Default: int64default.StaticInt64(1), + Description: "The maximum number of rows produced in one kafka message for row-based formats. The default value is `1`.", + Optional: true, + }, + "name": resource.StringAttribute{ + Description: "Name of the table.", + Required: true, + }, + "num_consumers": resource.Int64Attribute{ + Computed: true, + Default: int64default.StaticInt64(1), + Description: "The number of consumers per table per replica. The default value is `1`.", + Optional: true, + }, + "poll_max_batch_size": resource.Int64Attribute{ + Computed: true, + Default: int64default.StaticInt64(0), + Description: "Maximum amount of messages to be polled in a single Kafka poll. The default value is `0`.", + Optional: true, + }, + "skip_broken_messages": resource.Int64Attribute{ + Computed: true, + Default: int64default.StaticInt64(0), + Description: "Skip at least this number of broken messages from Kafka topic per block. The default value is `0`.", + Optional: true, + }, + }, + Blocks: map[string]resource.Block{ + "columns": resource.ListNestedBlock{ + Description: "Table columns", + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ + "name": resource.StringAttribute{ + Description: "Column name.", + Required: true, + }, + "type": resource.StringAttribute{ + Description: "Column type.", + Required: true, + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(100)}, + }, + "topics": resource.ListNestedBlock{ + Description: "Kafka topics", + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{"name": resource.StringAttribute{ + Description: "Name of the topic.", + Required: true, + }}}, + Validators: []validator.List{listvalidator.SizeAtMost(100)}, + }, + }, + }, + Validators: []validator.List{listvalidator.SizeAtMost(100)}, + }}}, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// NewDataSourceSchema returns datasource schema +func NewDataSourceSchema() datasource.ListNestedBlock { + return datasource.ListNestedBlock{ + Description: "Integration user config", + NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"tables": datasource.ListNestedBlock{ + Description: "Tables to create", + NestedObject: datasource.NestedBlockObject{ + Attributes: map[string]datasource.Attribute{ + "auto_offset_reset": datasource.StringAttribute{ + Computed: true, + Description: "Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.", + }, + "data_format": datasource.StringAttribute{ + Computed: true, + Description: "Message data format. The default value is `JSONEachRow`.", + }, + "date_time_input_format": datasource.StringAttribute{ + Computed: true, + Description: "Method to read DateTime from text input formats. The default value is `basic`.", + }, + "group_name": datasource.StringAttribute{ + Computed: true, + Description: "Kafka consumers group. The default value is `clickhouse`.", + }, + "handle_error_mode": datasource.StringAttribute{ + Computed: true, + Description: "How to handle errors for Kafka engine. The default value is `default`.", + }, + "max_block_size": datasource.Int64Attribute{ + Computed: true, + Description: "Number of row collected by poll(s) for flushing data from Kafka. The default value is `0`.", + }, + "max_rows_per_message": datasource.Int64Attribute{ + Computed: true, + Description: "The maximum number of rows produced in one kafka message for row-based formats. The default value is `1`.", + }, + "name": datasource.StringAttribute{ + Computed: true, + Description: "Name of the table.", + }, + "num_consumers": datasource.Int64Attribute{ + Computed: true, + Description: "The number of consumers per table per replica. The default value is `1`.", + }, + "poll_max_batch_size": datasource.Int64Attribute{ + Computed: true, + Description: "Maximum amount of messages to be polled in a single Kafka poll. The default value is `0`.", + }, + "skip_broken_messages": datasource.Int64Attribute{ + Computed: true, + Description: "Skip at least this number of broken messages from Kafka topic per block. The default value is `0`.", + }, + }, + Blocks: map[string]datasource.Block{ + "columns": datasource.ListNestedBlock{ + Description: "Table columns", + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ + "name": datasource.StringAttribute{ + Computed: true, + Description: "Column name.", + }, + "type": datasource.StringAttribute{ + Computed: true, + Description: "Column type.", + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(100)}, + }, + "topics": datasource.ListNestedBlock{ + Description: "Kafka topics", + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{"name": datasource.StringAttribute{ + Computed: true, + Description: "Name of the topic.", + }}}, + Validators: []validator.List{listvalidator.SizeAtMost(100)}, + }, + }, + }, + Validators: []validator.List{listvalidator.SizeAtMost(100)}, + }}}, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// tfoUserConfig Integration user config +type tfoUserConfig struct { + Tables types.List `tfsdk:"tables"` +} + +// dtoUserConfig request/response object +type dtoUserConfig struct { + Tables []*dtoTables `groups:"create,update" json:"tables,omitempty"` +} + +// expandUserConfig expands tf object into dto object +func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { + tablesVar := schemautil.ExpandListNested[tfoTables, dtoTables](ctx, diags, expandTables, o.Tables) + if diags.HasError() { + return nil + } + return &dtoUserConfig{Tables: tablesVar} +} + +// flattenUserConfig flattens dto object into tf object +func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { + tablesVar := schemautil.FlattenListNested[dtoTables, tfoTables](ctx, diags, flattenTables, tablesAttrs, o.Tables) + if diags.HasError() { + return nil + } + return &tfoUserConfig{Tables: tablesVar} +} + +var userConfigAttrs = map[string]attr.Type{"tables": types.ListType{ElemType: types.ObjectType{AttrTypes: tablesAttrs}}} + +// tfoTables Table to create +type tfoTables struct { + AutoOffsetReset types.String `tfsdk:"auto_offset_reset"` + Columns types.List `tfsdk:"columns"` + DataFormat types.String `tfsdk:"data_format"` + DateTimeInputFormat types.String `tfsdk:"date_time_input_format"` + GroupName types.String `tfsdk:"group_name"` + HandleErrorMode types.String `tfsdk:"handle_error_mode"` + MaxBlockSize types.Int64 `tfsdk:"max_block_size"` + MaxRowsPerMessage types.Int64 `tfsdk:"max_rows_per_message"` + Name types.String `tfsdk:"name"` + NumConsumers types.Int64 `tfsdk:"num_consumers"` + PollMaxBatchSize types.Int64 `tfsdk:"poll_max_batch_size"` + SkipBrokenMessages types.Int64 `tfsdk:"skip_broken_messages"` + Topics types.List `tfsdk:"topics"` +} + +// dtoTables request/response object +type dtoTables struct { + AutoOffsetReset *string `groups:"create,update" json:"auto_offset_reset,omitempty"` + Columns []*dtoColumns `groups:"create,update" json:"columns"` + DataFormat string `groups:"create,update" json:"data_format"` + DateTimeInputFormat *string `groups:"create,update" json:"date_time_input_format,omitempty"` + GroupName string `groups:"create,update" json:"group_name"` + HandleErrorMode *string `groups:"create,update" json:"handle_error_mode,omitempty"` + MaxBlockSize *int64 `groups:"create,update" json:"max_block_size,omitempty"` + MaxRowsPerMessage *int64 `groups:"create,update" json:"max_rows_per_message,omitempty"` + Name string `groups:"create,update" json:"name"` + NumConsumers *int64 `groups:"create,update" json:"num_consumers,omitempty"` + PollMaxBatchSize *int64 `groups:"create,update" json:"poll_max_batch_size,omitempty"` + SkipBrokenMessages *int64 `groups:"create,update" json:"skip_broken_messages,omitempty"` + Topics []*dtoTopics `groups:"create,update" json:"topics"` +} + +// expandTables expands tf object into dto object +func expandTables(ctx context.Context, diags *diag.Diagnostics, o *tfoTables) *dtoTables { + columnsVar := schemautil.ExpandListNested[tfoColumns, dtoColumns](ctx, diags, expandColumns, o.Columns) + if diags.HasError() { + return nil + } + topicsVar := schemautil.ExpandListNested[tfoTopics, dtoTopics](ctx, diags, expandTopics, o.Topics) + if diags.HasError() { + return nil + } + return &dtoTables{ + AutoOffsetReset: schemautil.ValueStringPointer(o.AutoOffsetReset), + Columns: columnsVar, + DataFormat: o.DataFormat.ValueString(), + DateTimeInputFormat: schemautil.ValueStringPointer(o.DateTimeInputFormat), + GroupName: o.GroupName.ValueString(), + HandleErrorMode: schemautil.ValueStringPointer(o.HandleErrorMode), + MaxBlockSize: schemautil.ValueInt64Pointer(o.MaxBlockSize), + MaxRowsPerMessage: schemautil.ValueInt64Pointer(o.MaxRowsPerMessage), + Name: o.Name.ValueString(), + NumConsumers: schemautil.ValueInt64Pointer(o.NumConsumers), + PollMaxBatchSize: schemautil.ValueInt64Pointer(o.PollMaxBatchSize), + SkipBrokenMessages: schemautil.ValueInt64Pointer(o.SkipBrokenMessages), + Topics: topicsVar, + } +} + +// flattenTables flattens dto object into tf object +func flattenTables(ctx context.Context, diags *diag.Diagnostics, o *dtoTables) *tfoTables { + columnsVar := schemautil.FlattenListNested[dtoColumns, tfoColumns](ctx, diags, flattenColumns, columnsAttrs, o.Columns) + if diags.HasError() { + return nil + } + topicsVar := schemautil.FlattenListNested[dtoTopics, tfoTopics](ctx, diags, flattenTopics, topicsAttrs, o.Topics) + if diags.HasError() { + return nil + } + return &tfoTables{ + AutoOffsetReset: types.StringPointerValue(o.AutoOffsetReset), + Columns: columnsVar, + DataFormat: types.StringValue(o.DataFormat), + DateTimeInputFormat: types.StringPointerValue(o.DateTimeInputFormat), + GroupName: types.StringValue(o.GroupName), + HandleErrorMode: types.StringPointerValue(o.HandleErrorMode), + MaxBlockSize: types.Int64PointerValue(o.MaxBlockSize), + MaxRowsPerMessage: types.Int64PointerValue(o.MaxRowsPerMessage), + Name: types.StringValue(o.Name), + NumConsumers: types.Int64PointerValue(o.NumConsumers), + PollMaxBatchSize: types.Int64PointerValue(o.PollMaxBatchSize), + SkipBrokenMessages: types.Int64PointerValue(o.SkipBrokenMessages), + Topics: topicsVar, + } +} + +var tablesAttrs = map[string]attr.Type{ + "auto_offset_reset": types.StringType, + "columns": types.ListType{ElemType: types.ObjectType{AttrTypes: columnsAttrs}}, + "data_format": types.StringType, + "date_time_input_format": types.StringType, + "group_name": types.StringType, + "handle_error_mode": types.StringType, + "max_block_size": types.Int64Type, + "max_rows_per_message": types.Int64Type, + "name": types.StringType, + "num_consumers": types.Int64Type, + "poll_max_batch_size": types.Int64Type, + "skip_broken_messages": types.Int64Type, + "topics": types.ListType{ElemType: types.ObjectType{AttrTypes: topicsAttrs}}, +} + +// tfoColumns Table column +type tfoColumns struct { + Name types.String `tfsdk:"name"` + Type types.String `tfsdk:"type"` +} + +// dtoColumns request/response object +type dtoColumns struct { + Name string `groups:"create,update" json:"name"` + Type string `groups:"create,update" json:"type"` +} + +// expandColumns expands tf object into dto object +func expandColumns(ctx context.Context, diags *diag.Diagnostics, o *tfoColumns) *dtoColumns { + return &dtoColumns{ + Name: o.Name.ValueString(), + Type: o.Type.ValueString(), + } +} + +// flattenColumns flattens dto object into tf object +func flattenColumns(ctx context.Context, diags *diag.Diagnostics, o *dtoColumns) *tfoColumns { + return &tfoColumns{ + Name: types.StringValue(o.Name), + Type: types.StringValue(o.Type), + } +} + +var columnsAttrs = map[string]attr.Type{ + "name": types.StringType, + "type": types.StringType, +} + +// tfoTopics Kafka topic +type tfoTopics struct { + Name types.String `tfsdk:"name"` +} + +// dtoTopics request/response object +type dtoTopics struct { + Name string `groups:"create,update" json:"name"` +} + +// expandTopics expands tf object into dto object +func expandTopics(ctx context.Context, diags *diag.Diagnostics, o *tfoTopics) *dtoTopics { + return &dtoTopics{Name: o.Name.ValueString()} +} + +// flattenTopics flattens dto object into tf object +func flattenTopics(ctx context.Context, diags *diag.Diagnostics, o *dtoTopics) *tfoTopics { + return &tfoTopics{Name: types.StringValue(o.Name)} +} + +var topicsAttrs = map[string]attr.Type{"name": types.StringType} + +// Expand public function that converts tf object into dto +func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { + return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +} + +// Flatten public function that converts dto into tf object +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { + o := new(dtoUserConfig) + err := schemautil.MapToDTO(m, o) + if err != nil { + diags.AddError("failed to marshal map user config to dto", err.Error()) + return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + } + return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) +} diff --git a/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka_test.go b/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka_test.go new file mode 100644 index 000000000..77316cfac --- /dev/null +++ b/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka_test.go @@ -0,0 +1,122 @@ +// Code generated by user config generator. DO NOT EDIT. + +package clickhousekafka + +import ( + "context" + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/stretchr/testify/require" + + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +const allFields = `{ + "tables": [ + { + "auto_offset_reset": "foo", + "columns": [ + { + "name": "foo", + "type": "foo" + } + ], + "data_format": "foo", + "date_time_input_format": "foo", + "group_name": "foo", + "handle_error_mode": "foo", + "max_block_size": 1, + "max_rows_per_message": 1, + "name": "foo", + "num_consumers": 1, + "poll_max_batch_size": 1, + "skip_broken_messages": 1, + "topics": [ + { + "name": "foo" + } + ] + } + ] +}` +const updateOnlyFields = `{ + "tables": [ + { + "auto_offset_reset": "foo", + "columns": [ + { + "name": "foo", + "type": "foo" + } + ], + "data_format": "foo", + "date_time_input_format": "foo", + "group_name": "foo", + "handle_error_mode": "foo", + "max_block_size": 1, + "max_rows_per_message": 1, + "name": "foo", + "num_consumers": 1, + "poll_max_batch_size": 1, + "skip_broken_messages": 1, + "topics": [ + { + "name": "foo" + } + ] + } + ] +}` + +func TestUserConfig(t *testing.T) { + cases := []struct { + name string + source string + expect string + marshal func(any) (map[string]any, error) + }{ + { + name: "fields to create resource", + source: allFields, + expect: allFields, + marshal: schemautil.MarshalCreateUserConfig, + }, + { + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + marshal: schemautil.MarshalUpdateUserConfig, + }, + } + + ctx := context.Background() + diags := new(diag.Diagnostics) + for _, opt := range cases { + t.Run(opt.name, func(t *testing.T) { + dto := new(dtoUserConfig) + err := json.Unmarshal([]byte(opt.source), dto) + require.NoError(t, err) + + // From json to TF + tfo := flattenUserConfig(ctx, diags, dto) + require.Empty(t, diags) + + // From TF to json + config := expandUserConfig(ctx, diags, tfo) + require.Empty(t, diags) + + // Run specific marshal (create or update resource) + dtoConfig, err := opt.marshal(config) + require.NoError(t, err) + + // Compares that output is strictly equal to the input + // If so, the flow is valid + b, err := json.MarshalIndent(dtoConfig, "", " ") + require.NoError(t, err) + require.Empty(t, cmp.Diff(opt.expect, string(b))) + }) + } +} diff --git a/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql.go b/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql.go new file mode 100644 index 000000000..a0cbd20b8 --- /dev/null +++ b/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql.go @@ -0,0 +1,145 @@ +// Code generated by user config generator. DO NOT EDIT. + +package clickhousepostgresql + +import ( + "context" + + listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + attr "github.com/hashicorp/terraform-plugin-framework/attr" + datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + diag "github.com/hashicorp/terraform-plugin-framework/diag" + resource "github.com/hashicorp/terraform-plugin-framework/resource/schema" + stringdefault "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + validator "github.com/hashicorp/terraform-plugin-framework/schema/validator" + types "github.com/hashicorp/terraform-plugin-framework/types" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +// NewResourceSchema returns resource schema +func NewResourceSchema() resource.ListNestedBlock { + return resource.ListNestedBlock{ + Description: "Integration user config", + NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"databases": resource.ListNestedBlock{ + Description: "Databases to expose", + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ + "database": resource.StringAttribute{ + Computed: true, + Default: stringdefault.StaticString("defaultdb"), + Description: "PostgreSQL database to expose. The default value is `defaultdb`.", + Optional: true, + }, + "schema": resource.StringAttribute{ + Computed: true, + Default: stringdefault.StaticString("public"), + Description: "PostgreSQL schema to expose. The default value is `public`.", + Optional: true, + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(10)}, + }}}, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// NewDataSourceSchema returns datasource schema +func NewDataSourceSchema() datasource.ListNestedBlock { + return datasource.ListNestedBlock{ + Description: "Integration user config", + NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"databases": datasource.ListNestedBlock{ + Description: "Databases to expose", + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ + "database": datasource.StringAttribute{ + Computed: true, + Description: "PostgreSQL database to expose. The default value is `defaultdb`.", + }, + "schema": datasource.StringAttribute{ + Computed: true, + Description: "PostgreSQL schema to expose. The default value is `public`.", + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(10)}, + }}}, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// tfoUserConfig Integration user config +type tfoUserConfig struct { + Databases types.List `tfsdk:"databases"` +} + +// dtoUserConfig request/response object +type dtoUserConfig struct { + Databases []*dtoDatabases `groups:"create,update" json:"databases,omitempty"` +} + +// expandUserConfig expands tf object into dto object +func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { + databasesVar := schemautil.ExpandListNested[tfoDatabases, dtoDatabases](ctx, diags, expandDatabases, o.Databases) + if diags.HasError() { + return nil + } + return &dtoUserConfig{Databases: databasesVar} +} + +// flattenUserConfig flattens dto object into tf object +func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { + databasesVar := schemautil.FlattenListNested[dtoDatabases, tfoDatabases](ctx, diags, flattenDatabases, databasesAttrs, o.Databases) + if diags.HasError() { + return nil + } + return &tfoUserConfig{Databases: databasesVar} +} + +var userConfigAttrs = map[string]attr.Type{"databases": types.ListType{ElemType: types.ObjectType{AttrTypes: databasesAttrs}}} + +// tfoDatabases Database to expose +type tfoDatabases struct { + Database types.String `tfsdk:"database"` + Schema types.String `tfsdk:"schema"` +} + +// dtoDatabases request/response object +type dtoDatabases struct { + Database *string `groups:"create,update" json:"database,omitempty"` + Schema *string `groups:"create,update" json:"schema,omitempty"` +} + +// expandDatabases expands tf object into dto object +func expandDatabases(ctx context.Context, diags *diag.Diagnostics, o *tfoDatabases) *dtoDatabases { + return &dtoDatabases{ + Database: schemautil.ValueStringPointer(o.Database), + Schema: schemautil.ValueStringPointer(o.Schema), + } +} + +// flattenDatabases flattens dto object into tf object +func flattenDatabases(ctx context.Context, diags *diag.Diagnostics, o *dtoDatabases) *tfoDatabases { + return &tfoDatabases{ + Database: types.StringPointerValue(o.Database), + Schema: types.StringPointerValue(o.Schema), + } +} + +var databasesAttrs = map[string]attr.Type{ + "database": types.StringType, + "schema": types.StringType, +} + +// Expand public function that converts tf object into dto +func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { + return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +} + +// Flatten public function that converts dto into tf object +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { + o := new(dtoUserConfig) + err := schemautil.MapToDTO(m, o) + if err != nil { + diags.AddError("failed to marshal map user config to dto", err.Error()) + return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + } + return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) +} diff --git a/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql_test.go b/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql_test.go new file mode 100644 index 000000000..217dea78b --- /dev/null +++ b/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql_test.go @@ -0,0 +1,82 @@ +// Code generated by user config generator. DO NOT EDIT. + +package clickhousepostgresql + +import ( + "context" + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/stretchr/testify/require" + + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +const allFields = `{ + "databases": [ + { + "database": "foo", + "schema": "foo" + } + ] +}` +const updateOnlyFields = `{ + "databases": [ + { + "database": "foo", + "schema": "foo" + } + ] +}` + +func TestUserConfig(t *testing.T) { + cases := []struct { + name string + source string + expect string + marshal func(any) (map[string]any, error) + }{ + { + name: "fields to create resource", + source: allFields, + expect: allFields, + marshal: schemautil.MarshalCreateUserConfig, + }, + { + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + marshal: schemautil.MarshalUpdateUserConfig, + }, + } + + ctx := context.Background() + diags := new(diag.Diagnostics) + for _, opt := range cases { + t.Run(opt.name, func(t *testing.T) { + dto := new(dtoUserConfig) + err := json.Unmarshal([]byte(opt.source), dto) + require.NoError(t, err) + + // From json to TF + tfo := flattenUserConfig(ctx, diags, dto) + require.Empty(t, diags) + + // From TF to json + config := expandUserConfig(ctx, diags, tfo) + require.Empty(t, diags) + + // Run specific marshal (create or update resource) + dtoConfig, err := opt.marshal(config) + require.NoError(t, err) + + // Compares that output is strictly equal to the input + // If so, the flow is valid + b, err := json.MarshalIndent(dtoConfig, "", " ") + require.NoError(t, err) + require.Empty(t, cmp.Diff(opt.expect, string(b))) + }) + } +} diff --git a/internal/plugin/service/userconfig/integration/datadog/datadog.go b/internal/plugin/service/userconfig/integration/datadog/datadog.go new file mode 100644 index 000000000..8a144b214 --- /dev/null +++ b/internal/plugin/service/userconfig/integration/datadog/datadog.go @@ -0,0 +1,460 @@ +// Code generated by user config generator. DO NOT EDIT. + +package datadog + +import ( + "context" + + listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + attr "github.com/hashicorp/terraform-plugin-framework/attr" + datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + diag "github.com/hashicorp/terraform-plugin-framework/diag" + resource "github.com/hashicorp/terraform-plugin-framework/resource/schema" + booldefault "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault" + validator "github.com/hashicorp/terraform-plugin-framework/schema/validator" + types "github.com/hashicorp/terraform-plugin-framework/types" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +// NewResourceSchema returns resource schema +func NewResourceSchema() resource.ListNestedBlock { + return resource.ListNestedBlock{ + NestedObject: resource.NestedBlockObject{ + Attributes: map[string]resource.Attribute{ + "datadog_dbm_enabled": resource.BoolAttribute{ + Computed: true, + Description: "Enable Datadog Database Monitoring.", + Optional: true, + }, + "exclude_consumer_groups": resource.ListAttribute{ + Computed: true, + Description: "List of custom metrics.", + ElementType: types.StringType, + Optional: true, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + "exclude_topics": resource.ListAttribute{ + Computed: true, + Description: "List of topics to exclude.", + ElementType: types.StringType, + Optional: true, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + "include_consumer_groups": resource.ListAttribute{ + Computed: true, + Description: "List of custom metrics.", + ElementType: types.StringType, + Optional: true, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + "include_topics": resource.ListAttribute{ + Computed: true, + Description: "List of topics to include.", + ElementType: types.StringType, + Optional: true, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + "kafka_custom_metrics": resource.ListAttribute{ + Computed: true, + Description: "List of custom metrics.", + ElementType: types.StringType, + Optional: true, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + "max_jmx_metrics": resource.Int64Attribute{ + Computed: true, + Description: "Maximum number of JMX metrics to send.", + Optional: true, + }, + }, + Blocks: map[string]resource.Block{ + "datadog_tags": resource.ListNestedBlock{ + Description: "Custom tags provided by user", + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ + "comment": resource.StringAttribute{ + Computed: true, + Description: "Optional tag explanation.", + Optional: true, + }, + "tag": resource.StringAttribute{ + Description: "Tag format and usage are described here: https://docs.datadoghq.com/getting_started/tagging. Tags with prefix 'aiven-' are reserved for Aiven.", + Required: true, + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(32)}, + }, + "opensearch": resource.ListNestedBlock{ + Description: "Datadog Opensearch Options", + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ + "index_stats_enabled": resource.BoolAttribute{ + Computed: true, + Description: "Enable Datadog Opensearch Index Monitoring.", + Optional: true, + }, + "pending_task_stats_enabled": resource.BoolAttribute{ + Computed: true, + Description: "Enable Datadog Opensearch Pending Task Monitoring.", + Optional: true, + }, + "pshard_stats_enabled": resource.BoolAttribute{ + Computed: true, + Description: "Enable Datadog Opensearch Primary Shard Monitoring.", + Optional: true, + }, + }}, + }, + "redis": resource.ListNestedBlock{ + Description: "Datadog Redis Options", + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{"command_stats_enabled": resource.BoolAttribute{ + Computed: true, + Default: booldefault.StaticBool(false), + Description: "Enable command_stats option in the agent's configuration. The default value is `false`.", + Optional: true, + }}}, + }, + }, + }, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// NewDataSourceSchema returns datasource schema +func NewDataSourceSchema() datasource.ListNestedBlock { + return datasource.ListNestedBlock{ + NestedObject: datasource.NestedBlockObject{ + Attributes: map[string]datasource.Attribute{ + "datadog_dbm_enabled": datasource.BoolAttribute{ + Computed: true, + Description: "Enable Datadog Database Monitoring.", + }, + "exclude_consumer_groups": datasource.ListAttribute{ + Computed: true, + Description: "List of custom metrics.", + ElementType: types.StringType, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + "exclude_topics": datasource.ListAttribute{ + Computed: true, + Description: "List of topics to exclude.", + ElementType: types.StringType, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + "include_consumer_groups": datasource.ListAttribute{ + Computed: true, + Description: "List of custom metrics.", + ElementType: types.StringType, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + "include_topics": datasource.ListAttribute{ + Computed: true, + Description: "List of topics to include.", + ElementType: types.StringType, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + "kafka_custom_metrics": datasource.ListAttribute{ + Computed: true, + Description: "List of custom metrics.", + ElementType: types.StringType, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + "max_jmx_metrics": datasource.Int64Attribute{ + Computed: true, + Description: "Maximum number of JMX metrics to send.", + }, + }, + Blocks: map[string]datasource.Block{ + "datadog_tags": datasource.ListNestedBlock{ + Description: "Custom tags provided by user", + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ + "comment": datasource.StringAttribute{ + Computed: true, + Description: "Optional tag explanation.", + }, + "tag": datasource.StringAttribute{ + Computed: true, + Description: "Tag format and usage are described here: https://docs.datadoghq.com/getting_started/tagging. Tags with prefix 'aiven-' are reserved for Aiven.", + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(32)}, + }, + "opensearch": datasource.ListNestedBlock{ + Description: "Datadog Opensearch Options", + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ + "index_stats_enabled": datasource.BoolAttribute{ + Computed: true, + Description: "Enable Datadog Opensearch Index Monitoring.", + }, + "pending_task_stats_enabled": datasource.BoolAttribute{ + Computed: true, + Description: "Enable Datadog Opensearch Pending Task Monitoring.", + }, + "pshard_stats_enabled": datasource.BoolAttribute{ + Computed: true, + Description: "Enable Datadog Opensearch Primary Shard Monitoring.", + }, + }}, + }, + "redis": datasource.ListNestedBlock{ + Description: "Datadog Redis Options", + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{"command_stats_enabled": datasource.BoolAttribute{ + Computed: true, + Description: "Enable command_stats option in the agent's configuration. The default value is `false`.", + }}}, + }, + }, + }, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// tfoUserConfig +type tfoUserConfig struct { + DatadogDbmEnabled types.Bool `tfsdk:"datadog_dbm_enabled"` + DatadogTags types.List `tfsdk:"datadog_tags"` + ExcludeConsumerGroups types.List `tfsdk:"exclude_consumer_groups"` + ExcludeTopics types.List `tfsdk:"exclude_topics"` + IncludeConsumerGroups types.List `tfsdk:"include_consumer_groups"` + IncludeTopics types.List `tfsdk:"include_topics"` + KafkaCustomMetrics types.List `tfsdk:"kafka_custom_metrics"` + MaxJmxMetrics types.Int64 `tfsdk:"max_jmx_metrics"` + Opensearch types.List `tfsdk:"opensearch"` + Redis types.List `tfsdk:"redis"` +} + +// dtoUserConfig request/response object +type dtoUserConfig struct { + DatadogDbmEnabled *bool `groups:"create,update" json:"datadog_dbm_enabled,omitempty"` + DatadogTags []*dtoDatadogTags `groups:"create,update" json:"datadog_tags,omitempty"` + ExcludeConsumerGroups []string `groups:"create,update" json:"exclude_consumer_groups,omitempty"` + ExcludeTopics []string `groups:"create,update" json:"exclude_topics,omitempty"` + IncludeConsumerGroups []string `groups:"create,update" json:"include_consumer_groups,omitempty"` + IncludeTopics []string `groups:"create,update" json:"include_topics,omitempty"` + KafkaCustomMetrics []string `groups:"create,update" json:"kafka_custom_metrics,omitempty"` + MaxJmxMetrics *int64 `groups:"create,update" json:"max_jmx_metrics,omitempty"` + Opensearch *dtoOpensearch `groups:"create,update" json:"opensearch,omitempty"` + Redis *dtoRedis `groups:"create,update" json:"redis,omitempty"` +} + +// expandUserConfig expands tf object into dto object +func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { + datadogTagsVar := schemautil.ExpandListNested[tfoDatadogTags, dtoDatadogTags](ctx, diags, expandDatadogTags, o.DatadogTags) + if diags.HasError() { + return nil + } + excludeConsumerGroupsVar := schemautil.ExpandList[string](ctx, diags, o.ExcludeConsumerGroups) + if diags.HasError() { + return nil + } + excludeTopicsVar := schemautil.ExpandList[string](ctx, diags, o.ExcludeTopics) + if diags.HasError() { + return nil + } + includeConsumerGroupsVar := schemautil.ExpandList[string](ctx, diags, o.IncludeConsumerGroups) + if diags.HasError() { + return nil + } + includeTopicsVar := schemautil.ExpandList[string](ctx, diags, o.IncludeTopics) + if diags.HasError() { + return nil + } + kafkaCustomMetricsVar := schemautil.ExpandList[string](ctx, diags, o.KafkaCustomMetrics) + if diags.HasError() { + return nil + } + opensearchVar := schemautil.ExpandListBlockNested[tfoOpensearch, dtoOpensearch](ctx, diags, expandOpensearch, o.Opensearch) + if diags.HasError() { + return nil + } + redisVar := schemautil.ExpandListBlockNested[tfoRedis, dtoRedis](ctx, diags, expandRedis, o.Redis) + if diags.HasError() { + return nil + } + return &dtoUserConfig{ + DatadogDbmEnabled: schemautil.ValueBoolPointer(o.DatadogDbmEnabled), + DatadogTags: datadogTagsVar, + ExcludeConsumerGroups: excludeConsumerGroupsVar, + ExcludeTopics: excludeTopicsVar, + IncludeConsumerGroups: includeConsumerGroupsVar, + IncludeTopics: includeTopicsVar, + KafkaCustomMetrics: kafkaCustomMetricsVar, + MaxJmxMetrics: schemautil.ValueInt64Pointer(o.MaxJmxMetrics), + Opensearch: opensearchVar, + Redis: redisVar, + } +} + +// flattenUserConfig flattens dto object into tf object +func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { + datadogTagsVar := schemautil.FlattenListNested[dtoDatadogTags, tfoDatadogTags](ctx, diags, flattenDatadogTags, datadogTagsAttrs, o.DatadogTags) + if diags.HasError() { + return nil + } + excludeConsumerGroupsVar, d := types.ListValueFrom(ctx, types.StringType, o.ExcludeConsumerGroups) + diags.Append(d...) + if diags.HasError() { + return nil + } + excludeTopicsVar, d := types.ListValueFrom(ctx, types.StringType, o.ExcludeTopics) + diags.Append(d...) + if diags.HasError() { + return nil + } + includeConsumerGroupsVar, d := types.ListValueFrom(ctx, types.StringType, o.IncludeConsumerGroups) + diags.Append(d...) + if diags.HasError() { + return nil + } + includeTopicsVar, d := types.ListValueFrom(ctx, types.StringType, o.IncludeTopics) + diags.Append(d...) + if diags.HasError() { + return nil + } + kafkaCustomMetricsVar, d := types.ListValueFrom(ctx, types.StringType, o.KafkaCustomMetrics) + diags.Append(d...) + if diags.HasError() { + return nil + } + opensearchVar := schemautil.FlattenListBlockNested[dtoOpensearch, tfoOpensearch](ctx, diags, flattenOpensearch, opensearchAttrs, o.Opensearch) + if diags.HasError() { + return nil + } + redisVar := schemautil.FlattenListBlockNested[dtoRedis, tfoRedis](ctx, diags, flattenRedis, redisAttrs, o.Redis) + if diags.HasError() { + return nil + } + return &tfoUserConfig{ + DatadogDbmEnabled: types.BoolPointerValue(o.DatadogDbmEnabled), + DatadogTags: datadogTagsVar, + ExcludeConsumerGroups: excludeConsumerGroupsVar, + ExcludeTopics: excludeTopicsVar, + IncludeConsumerGroups: includeConsumerGroupsVar, + IncludeTopics: includeTopicsVar, + KafkaCustomMetrics: kafkaCustomMetricsVar, + MaxJmxMetrics: types.Int64PointerValue(o.MaxJmxMetrics), + Opensearch: opensearchVar, + Redis: redisVar, + } +} + +var userConfigAttrs = map[string]attr.Type{ + "datadog_dbm_enabled": types.BoolType, + "datadog_tags": types.ListType{ElemType: types.ObjectType{AttrTypes: datadogTagsAttrs}}, + "exclude_consumer_groups": types.ListType{ElemType: types.StringType}, + "exclude_topics": types.ListType{ElemType: types.StringType}, + "include_consumer_groups": types.ListType{ElemType: types.StringType}, + "include_topics": types.ListType{ElemType: types.StringType}, + "kafka_custom_metrics": types.ListType{ElemType: types.StringType}, + "max_jmx_metrics": types.Int64Type, + "opensearch": types.ListType{ElemType: types.ObjectType{AttrTypes: opensearchAttrs}}, + "redis": types.ListType{ElemType: types.ObjectType{AttrTypes: redisAttrs}}, +} + +// tfoDatadogTags Datadog tag defined by user +type tfoDatadogTags struct { + Comment types.String `tfsdk:"comment"` + Tag types.String `tfsdk:"tag"` +} + +// dtoDatadogTags request/response object +type dtoDatadogTags struct { + Comment *string `groups:"create,update" json:"comment,omitempty"` + Tag string `groups:"create,update" json:"tag"` +} + +// expandDatadogTags expands tf object into dto object +func expandDatadogTags(ctx context.Context, diags *diag.Diagnostics, o *tfoDatadogTags) *dtoDatadogTags { + return &dtoDatadogTags{ + Comment: schemautil.ValueStringPointer(o.Comment), + Tag: o.Tag.ValueString(), + } +} + +// flattenDatadogTags flattens dto object into tf object +func flattenDatadogTags(ctx context.Context, diags *diag.Diagnostics, o *dtoDatadogTags) *tfoDatadogTags { + return &tfoDatadogTags{ + Comment: types.StringPointerValue(o.Comment), + Tag: types.StringValue(o.Tag), + } +} + +var datadogTagsAttrs = map[string]attr.Type{ + "comment": types.StringType, + "tag": types.StringType, +} + +// tfoOpensearch Datadog Opensearch Options +type tfoOpensearch struct { + IndexStatsEnabled types.Bool `tfsdk:"index_stats_enabled"` + PendingTaskStatsEnabled types.Bool `tfsdk:"pending_task_stats_enabled"` + PshardStatsEnabled types.Bool `tfsdk:"pshard_stats_enabled"` +} + +// dtoOpensearch request/response object +type dtoOpensearch struct { + IndexStatsEnabled *bool `groups:"create,update" json:"index_stats_enabled,omitempty"` + PendingTaskStatsEnabled *bool `groups:"create,update" json:"pending_task_stats_enabled,omitempty"` + PshardStatsEnabled *bool `groups:"create,update" json:"pshard_stats_enabled,omitempty"` +} + +// expandOpensearch expands tf object into dto object +func expandOpensearch(ctx context.Context, diags *diag.Diagnostics, o *tfoOpensearch) *dtoOpensearch { + return &dtoOpensearch{ + IndexStatsEnabled: schemautil.ValueBoolPointer(o.IndexStatsEnabled), + PendingTaskStatsEnabled: schemautil.ValueBoolPointer(o.PendingTaskStatsEnabled), + PshardStatsEnabled: schemautil.ValueBoolPointer(o.PshardStatsEnabled), + } +} + +// flattenOpensearch flattens dto object into tf object +func flattenOpensearch(ctx context.Context, diags *diag.Diagnostics, o *dtoOpensearch) *tfoOpensearch { + return &tfoOpensearch{ + IndexStatsEnabled: types.BoolPointerValue(o.IndexStatsEnabled), + PendingTaskStatsEnabled: types.BoolPointerValue(o.PendingTaskStatsEnabled), + PshardStatsEnabled: types.BoolPointerValue(o.PshardStatsEnabled), + } +} + +var opensearchAttrs = map[string]attr.Type{ + "index_stats_enabled": types.BoolType, + "pending_task_stats_enabled": types.BoolType, + "pshard_stats_enabled": types.BoolType, +} + +// tfoRedis Datadog Redis Options +type tfoRedis struct { + CommandStatsEnabled types.Bool `tfsdk:"command_stats_enabled"` +} + +// dtoRedis request/response object +type dtoRedis struct { + CommandStatsEnabled *bool `groups:"create,update" json:"command_stats_enabled,omitempty"` +} + +// expandRedis expands tf object into dto object +func expandRedis(ctx context.Context, diags *diag.Diagnostics, o *tfoRedis) *dtoRedis { + return &dtoRedis{CommandStatsEnabled: schemautil.ValueBoolPointer(o.CommandStatsEnabled)} +} + +// flattenRedis flattens dto object into tf object +func flattenRedis(ctx context.Context, diags *diag.Diagnostics, o *dtoRedis) *tfoRedis { + return &tfoRedis{CommandStatsEnabled: types.BoolPointerValue(o.CommandStatsEnabled)} +} + +var redisAttrs = map[string]attr.Type{"command_stats_enabled": types.BoolType} + +// Expand public function that converts tf object into dto +func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { + return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +} + +// Flatten public function that converts dto into tf object +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { + o := new(dtoUserConfig) + err := schemautil.MapToDTO(m, o) + if err != nil { + diags.AddError("failed to marshal map user config to dto", err.Error()) + return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + } + return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) +} diff --git a/internal/plugin/service/userconfig/integration/datadog/datadog_test.go b/internal/plugin/service/userconfig/integration/datadog/datadog_test.go new file mode 100644 index 000000000..7dc27e200 --- /dev/null +++ b/internal/plugin/service/userconfig/integration/datadog/datadog_test.go @@ -0,0 +1,132 @@ +// Code generated by user config generator. DO NOT EDIT. + +package datadog + +import ( + "context" + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/stretchr/testify/require" + + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +const allFields = `{ + "datadog_dbm_enabled": true, + "datadog_tags": [ + { + "comment": "foo", + "tag": "foo" + } + ], + "exclude_consumer_groups": [ + "foo" + ], + "exclude_topics": [ + "foo" + ], + "include_consumer_groups": [ + "foo" + ], + "include_topics": [ + "foo" + ], + "kafka_custom_metrics": [ + "foo" + ], + "max_jmx_metrics": 1, + "opensearch": { + "index_stats_enabled": true, + "pending_task_stats_enabled": true, + "pshard_stats_enabled": true + }, + "redis": { + "command_stats_enabled": true + } +}` +const updateOnlyFields = `{ + "datadog_dbm_enabled": true, + "datadog_tags": [ + { + "comment": "foo", + "tag": "foo" + } + ], + "exclude_consumer_groups": [ + "foo" + ], + "exclude_topics": [ + "foo" + ], + "include_consumer_groups": [ + "foo" + ], + "include_topics": [ + "foo" + ], + "kafka_custom_metrics": [ + "foo" + ], + "max_jmx_metrics": 1, + "opensearch": { + "index_stats_enabled": true, + "pending_task_stats_enabled": true, + "pshard_stats_enabled": true + }, + "redis": { + "command_stats_enabled": true + } +}` + +func TestUserConfig(t *testing.T) { + cases := []struct { + name string + source string + expect string + marshal func(any) (map[string]any, error) + }{ + { + name: "fields to create resource", + source: allFields, + expect: allFields, + marshal: schemautil.MarshalCreateUserConfig, + }, + { + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + marshal: schemautil.MarshalUpdateUserConfig, + }, + } + + ctx := context.Background() + diags := new(diag.Diagnostics) + for _, opt := range cases { + t.Run(opt.name, func(t *testing.T) { + dto := new(dtoUserConfig) + err := json.Unmarshal([]byte(opt.source), dto) + require.NoError(t, err) + + // From json to TF + tfo := flattenUserConfig(ctx, diags, dto) + require.Empty(t, diags) + + // From TF to json + config := expandUserConfig(ctx, diags, tfo) + require.Empty(t, diags) + + // Run specific marshal (create or update resource) + dtoConfig, err := opt.marshal(config) + require.NoError(t, err) + + // Compares that output is strictly equal to the input + // If so, the flow is valid + b, err := json.MarshalIndent(dtoConfig, "", " ") + require.NoError(t, err) + require.Empty(t, cmp.Diff(opt.expect, string(b))) + }) + } +} diff --git a/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics.go b/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics.go new file mode 100644 index 000000000..bc0a75e75 --- /dev/null +++ b/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics.go @@ -0,0 +1,224 @@ +// Code generated by user config generator. DO NOT EDIT. + +package externalawscloudwatchmetrics + +import ( + "context" + + listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + attr "github.com/hashicorp/terraform-plugin-framework/attr" + datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + diag "github.com/hashicorp/terraform-plugin-framework/diag" + resource "github.com/hashicorp/terraform-plugin-framework/resource/schema" + validator "github.com/hashicorp/terraform-plugin-framework/schema/validator" + types "github.com/hashicorp/terraform-plugin-framework/types" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +// NewResourceSchema returns resource schema +func NewResourceSchema() resource.ListNestedBlock { + return resource.ListNestedBlock{ + Description: "External AWS CloudWatch Metrics integration user config", + NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{ + "dropped_metrics": resource.ListNestedBlock{ + Description: "Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics)", + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ + "field": resource.StringAttribute{ + Description: "Identifier of a value in the metric.", + Required: true, + }, + "metric": resource.StringAttribute{ + Description: "Identifier of the metric.", + Required: true, + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + "extra_metrics": resource.ListNestedBlock{ + Description: "Metrics to allow through to AWS CloudWatch (in addition to default metrics)", + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ + "field": resource.StringAttribute{ + Description: "Identifier of a value in the metric.", + Required: true, + }, + "metric": resource.StringAttribute{ + Description: "Identifier of the metric.", + Required: true, + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// NewDataSourceSchema returns datasource schema +func NewDataSourceSchema() datasource.ListNestedBlock { + return datasource.ListNestedBlock{ + Description: "External AWS CloudWatch Metrics integration user config", + NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{ + "dropped_metrics": datasource.ListNestedBlock{ + Description: "Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics)", + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ + "field": datasource.StringAttribute{ + Computed: true, + Description: "Identifier of a value in the metric.", + }, + "metric": datasource.StringAttribute{ + Computed: true, + Description: "Identifier of the metric.", + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + "extra_metrics": datasource.ListNestedBlock{ + Description: "Metrics to allow through to AWS CloudWatch (in addition to default metrics)", + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ + "field": datasource.StringAttribute{ + Computed: true, + Description: "Identifier of a value in the metric.", + }, + "metric": datasource.StringAttribute{ + Computed: true, + Description: "Identifier of the metric.", + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// tfoUserConfig External AWS CloudWatch Metrics integration user config +type tfoUserConfig struct { + DroppedMetrics types.List `tfsdk:"dropped_metrics"` + ExtraMetrics types.List `tfsdk:"extra_metrics"` +} + +// dtoUserConfig request/response object +type dtoUserConfig struct { + DroppedMetrics []*dtoDroppedMetrics `groups:"create,update" json:"dropped_metrics,omitempty"` + ExtraMetrics []*dtoExtraMetrics `groups:"create,update" json:"extra_metrics,omitempty"` +} + +// expandUserConfig expands tf object into dto object +func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { + droppedMetricsVar := schemautil.ExpandListNested[tfoDroppedMetrics, dtoDroppedMetrics](ctx, diags, expandDroppedMetrics, o.DroppedMetrics) + if diags.HasError() { + return nil + } + extraMetricsVar := schemautil.ExpandListNested[tfoExtraMetrics, dtoExtraMetrics](ctx, diags, expandExtraMetrics, o.ExtraMetrics) + if diags.HasError() { + return nil + } + return &dtoUserConfig{ + DroppedMetrics: droppedMetricsVar, + ExtraMetrics: extraMetricsVar, + } +} + +// flattenUserConfig flattens dto object into tf object +func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { + droppedMetricsVar := schemautil.FlattenListNested[dtoDroppedMetrics, tfoDroppedMetrics](ctx, diags, flattenDroppedMetrics, droppedMetricsAttrs, o.DroppedMetrics) + if diags.HasError() { + return nil + } + extraMetricsVar := schemautil.FlattenListNested[dtoExtraMetrics, tfoExtraMetrics](ctx, diags, flattenExtraMetrics, extraMetricsAttrs, o.ExtraMetrics) + if diags.HasError() { + return nil + } + return &tfoUserConfig{ + DroppedMetrics: droppedMetricsVar, + ExtraMetrics: extraMetricsVar, + } +} + +var userConfigAttrs = map[string]attr.Type{ + "dropped_metrics": types.ListType{ElemType: types.ObjectType{AttrTypes: droppedMetricsAttrs}}, + "extra_metrics": types.ListType{ElemType: types.ObjectType{AttrTypes: extraMetricsAttrs}}, +} + +// tfoDroppedMetrics Metric name and subfield +type tfoDroppedMetrics struct { + Field types.String `tfsdk:"field"` + Metric types.String `tfsdk:"metric"` +} + +// dtoDroppedMetrics request/response object +type dtoDroppedMetrics struct { + Field string `groups:"create,update" json:"field"` + Metric string `groups:"create,update" json:"metric"` +} + +// expandDroppedMetrics expands tf object into dto object +func expandDroppedMetrics(ctx context.Context, diags *diag.Diagnostics, o *tfoDroppedMetrics) *dtoDroppedMetrics { + return &dtoDroppedMetrics{ + Field: o.Field.ValueString(), + Metric: o.Metric.ValueString(), + } +} + +// flattenDroppedMetrics flattens dto object into tf object +func flattenDroppedMetrics(ctx context.Context, diags *diag.Diagnostics, o *dtoDroppedMetrics) *tfoDroppedMetrics { + return &tfoDroppedMetrics{ + Field: types.StringValue(o.Field), + Metric: types.StringValue(o.Metric), + } +} + +var droppedMetricsAttrs = map[string]attr.Type{ + "field": types.StringType, + "metric": types.StringType, +} + +// tfoExtraMetrics Metric name and subfield +type tfoExtraMetrics struct { + Field types.String `tfsdk:"field"` + Metric types.String `tfsdk:"metric"` +} + +// dtoExtraMetrics request/response object +type dtoExtraMetrics struct { + Field string `groups:"create,update" json:"field"` + Metric string `groups:"create,update" json:"metric"` +} + +// expandExtraMetrics expands tf object into dto object +func expandExtraMetrics(ctx context.Context, diags *diag.Diagnostics, o *tfoExtraMetrics) *dtoExtraMetrics { + return &dtoExtraMetrics{ + Field: o.Field.ValueString(), + Metric: o.Metric.ValueString(), + } +} + +// flattenExtraMetrics flattens dto object into tf object +func flattenExtraMetrics(ctx context.Context, diags *diag.Diagnostics, o *dtoExtraMetrics) *tfoExtraMetrics { + return &tfoExtraMetrics{ + Field: types.StringValue(o.Field), + Metric: types.StringValue(o.Metric), + } +} + +var extraMetricsAttrs = map[string]attr.Type{ + "field": types.StringType, + "metric": types.StringType, +} + +// Expand public function that converts tf object into dto +func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { + return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +} + +// Flatten public function that converts dto into tf object +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { + o := new(dtoUserConfig) + err := schemautil.MapToDTO(m, o) + if err != nil { + diags.AddError("failed to marshal map user config to dto", err.Error()) + return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + } + return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) +} diff --git a/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics_test.go b/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics_test.go new file mode 100644 index 000000000..9cf794599 --- /dev/null +++ b/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics_test.go @@ -0,0 +1,94 @@ +// Code generated by user config generator. DO NOT EDIT. + +package externalawscloudwatchmetrics + +import ( + "context" + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/stretchr/testify/require" + + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +const allFields = `{ + "dropped_metrics": [ + { + "field": "foo", + "metric": "foo" + } + ], + "extra_metrics": [ + { + "field": "foo", + "metric": "foo" + } + ] +}` +const updateOnlyFields = `{ + "dropped_metrics": [ + { + "field": "foo", + "metric": "foo" + } + ], + "extra_metrics": [ + { + "field": "foo", + "metric": "foo" + } + ] +}` + +func TestUserConfig(t *testing.T) { + cases := []struct { + name string + source string + expect string + marshal func(any) (map[string]any, error) + }{ + { + name: "fields to create resource", + source: allFields, + expect: allFields, + marshal: schemautil.MarshalCreateUserConfig, + }, + { + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + marshal: schemautil.MarshalUpdateUserConfig, + }, + } + + ctx := context.Background() + diags := new(diag.Diagnostics) + for _, opt := range cases { + t.Run(opt.name, func(t *testing.T) { + dto := new(dtoUserConfig) + err := json.Unmarshal([]byte(opt.source), dto) + require.NoError(t, err) + + // From json to TF + tfo := flattenUserConfig(ctx, diags, dto) + require.Empty(t, diags) + + // From TF to json + config := expandUserConfig(ctx, diags, tfo) + require.Empty(t, diags) + + // Run specific marshal (create or update resource) + dtoConfig, err := opt.marshal(config) + require.NoError(t, err) + + // Compares that output is strictly equal to the input + // If so, the flow is valid + b, err := json.MarshalIndent(dtoConfig, "", " ") + require.NoError(t, err) + require.Empty(t, cmp.Diff(opt.expect, string(b))) + }) + } +} diff --git a/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect.go b/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect.go new file mode 100644 index 000000000..7593af6cf --- /dev/null +++ b/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect.go @@ -0,0 +1,168 @@ +// Code generated by user config generator. DO NOT EDIT. + +package kafkaconnect + +import ( + "context" + + listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + attr "github.com/hashicorp/terraform-plugin-framework/attr" + datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + diag "github.com/hashicorp/terraform-plugin-framework/diag" + resource "github.com/hashicorp/terraform-plugin-framework/resource/schema" + validator "github.com/hashicorp/terraform-plugin-framework/schema/validator" + types "github.com/hashicorp/terraform-plugin-framework/types" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +// NewResourceSchema returns resource schema +func NewResourceSchema() resource.ListNestedBlock { + return resource.ListNestedBlock{ + Description: "Integration user config", + NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"kafka_connect": resource.ListNestedBlock{ + Description: "Kafka Connect service configuration values", + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ + "config_storage_topic": resource.StringAttribute{ + Computed: true, + Description: "The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id.", + Optional: true, + }, + "group_id": resource.StringAttribute{ + Computed: true, + Description: "A unique string that identifies the Connect cluster group this worker belongs to.", + Optional: true, + }, + "offset_storage_topic": resource.StringAttribute{ + Computed: true, + Description: "The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id.", + Optional: true, + }, + "status_storage_topic": resource.StringAttribute{ + Computed: true, + Description: "The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id.", + Optional: true, + }, + }}, + }}}, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// NewDataSourceSchema returns datasource schema +func NewDataSourceSchema() datasource.ListNestedBlock { + return datasource.ListNestedBlock{ + Description: "Integration user config", + NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"kafka_connect": datasource.ListNestedBlock{ + Description: "Kafka Connect service configuration values", + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ + "config_storage_topic": datasource.StringAttribute{ + Computed: true, + Description: "The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id.", + }, + "group_id": datasource.StringAttribute{ + Computed: true, + Description: "A unique string that identifies the Connect cluster group this worker belongs to.", + }, + "offset_storage_topic": datasource.StringAttribute{ + Computed: true, + Description: "The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id.", + }, + "status_storage_topic": datasource.StringAttribute{ + Computed: true, + Description: "The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id.", + }, + }}, + }}}, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// tfoUserConfig Integration user config +type tfoUserConfig struct { + KafkaConnect types.List `tfsdk:"kafka_connect"` +} + +// dtoUserConfig request/response object +type dtoUserConfig struct { + KafkaConnect *dtoKafkaConnect `groups:"create,update" json:"kafka_connect,omitempty"` +} + +// expandUserConfig expands tf object into dto object +func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { + kafkaConnectVar := schemautil.ExpandListBlockNested[tfoKafkaConnect, dtoKafkaConnect](ctx, diags, expandKafkaConnect, o.KafkaConnect) + if diags.HasError() { + return nil + } + return &dtoUserConfig{KafkaConnect: kafkaConnectVar} +} + +// flattenUserConfig flattens dto object into tf object +func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { + kafkaConnectVar := schemautil.FlattenListBlockNested[dtoKafkaConnect, tfoKafkaConnect](ctx, diags, flattenKafkaConnect, kafkaConnectAttrs, o.KafkaConnect) + if diags.HasError() { + return nil + } + return &tfoUserConfig{KafkaConnect: kafkaConnectVar} +} + +var userConfigAttrs = map[string]attr.Type{"kafka_connect": types.ListType{ElemType: types.ObjectType{AttrTypes: kafkaConnectAttrs}}} + +// tfoKafkaConnect Kafka Connect service configuration values +type tfoKafkaConnect struct { + ConfigStorageTopic types.String `tfsdk:"config_storage_topic"` + GroupId types.String `tfsdk:"group_id"` + OffsetStorageTopic types.String `tfsdk:"offset_storage_topic"` + StatusStorageTopic types.String `tfsdk:"status_storage_topic"` +} + +// dtoKafkaConnect request/response object +type dtoKafkaConnect struct { + ConfigStorageTopic *string `groups:"create,update" json:"config_storage_topic,omitempty"` + GroupId *string `groups:"create,update" json:"group_id,omitempty"` + OffsetStorageTopic *string `groups:"create,update" json:"offset_storage_topic,omitempty"` + StatusStorageTopic *string `groups:"create,update" json:"status_storage_topic,omitempty"` +} + +// expandKafkaConnect expands tf object into dto object +func expandKafkaConnect(ctx context.Context, diags *diag.Diagnostics, o *tfoKafkaConnect) *dtoKafkaConnect { + return &dtoKafkaConnect{ + ConfigStorageTopic: schemautil.ValueStringPointer(o.ConfigStorageTopic), + GroupId: schemautil.ValueStringPointer(o.GroupId), + OffsetStorageTopic: schemautil.ValueStringPointer(o.OffsetStorageTopic), + StatusStorageTopic: schemautil.ValueStringPointer(o.StatusStorageTopic), + } +} + +// flattenKafkaConnect flattens dto object into tf object +func flattenKafkaConnect(ctx context.Context, diags *diag.Diagnostics, o *dtoKafkaConnect) *tfoKafkaConnect { + return &tfoKafkaConnect{ + ConfigStorageTopic: types.StringPointerValue(o.ConfigStorageTopic), + GroupId: types.StringPointerValue(o.GroupId), + OffsetStorageTopic: types.StringPointerValue(o.OffsetStorageTopic), + StatusStorageTopic: types.StringPointerValue(o.StatusStorageTopic), + } +} + +var kafkaConnectAttrs = map[string]attr.Type{ + "config_storage_topic": types.StringType, + "group_id": types.StringType, + "offset_storage_topic": types.StringType, + "status_storage_topic": types.StringType, +} + +// Expand public function that converts tf object into dto +func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { + return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +} + +// Flatten public function that converts dto into tf object +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { + o := new(dtoUserConfig) + err := schemautil.MapToDTO(m, o) + if err != nil { + diags.AddError("failed to marshal map user config to dto", err.Error()) + return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + } + return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) +} diff --git a/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect_test.go b/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect_test.go new file mode 100644 index 000000000..964993b87 --- /dev/null +++ b/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect_test.go @@ -0,0 +1,82 @@ +// Code generated by user config generator. DO NOT EDIT. + +package kafkaconnect + +import ( + "context" + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/stretchr/testify/require" + + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +const allFields = `{ + "kafka_connect": { + "config_storage_topic": "foo", + "group_id": "foo", + "offset_storage_topic": "foo", + "status_storage_topic": "foo" + } +}` +const updateOnlyFields = `{ + "kafka_connect": { + "config_storage_topic": "foo", + "group_id": "foo", + "offset_storage_topic": "foo", + "status_storage_topic": "foo" + } +}` + +func TestUserConfig(t *testing.T) { + cases := []struct { + name string + source string + expect string + marshal func(any) (map[string]any, error) + }{ + { + name: "fields to create resource", + source: allFields, + expect: allFields, + marshal: schemautil.MarshalCreateUserConfig, + }, + { + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + marshal: schemautil.MarshalUpdateUserConfig, + }, + } + + ctx := context.Background() + diags := new(diag.Diagnostics) + for _, opt := range cases { + t.Run(opt.name, func(t *testing.T) { + dto := new(dtoUserConfig) + err := json.Unmarshal([]byte(opt.source), dto) + require.NoError(t, err) + + // From json to TF + tfo := flattenUserConfig(ctx, diags, dto) + require.Empty(t, diags) + + // From TF to json + config := expandUserConfig(ctx, diags, tfo) + require.Empty(t, diags) + + // Run specific marshal (create or update resource) + dtoConfig, err := opt.marshal(config) + require.NoError(t, err) + + // Compares that output is strictly equal to the input + // If so, the flow is valid + b, err := json.MarshalIndent(dtoConfig, "", " ") + require.NoError(t, err) + require.Empty(t, cmp.Diff(opt.expect, string(b))) + }) + } +} diff --git a/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs.go b/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs.go new file mode 100644 index 000000000..b1c8cb565 --- /dev/null +++ b/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs.go @@ -0,0 +1,114 @@ +// Code generated by user config generator. DO NOT EDIT. + +package kafkalogs + +import ( + "context" + + listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + attr "github.com/hashicorp/terraform-plugin-framework/attr" + datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + diag "github.com/hashicorp/terraform-plugin-framework/diag" + resource "github.com/hashicorp/terraform-plugin-framework/resource/schema" + validator "github.com/hashicorp/terraform-plugin-framework/schema/validator" + types "github.com/hashicorp/terraform-plugin-framework/types" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +// NewResourceSchema returns resource schema +func NewResourceSchema() resource.ListNestedBlock { + return resource.ListNestedBlock{ + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ + "kafka_topic": resource.StringAttribute{ + Description: "Topic name.", + Required: true, + }, + "selected_log_fields": resource.ListAttribute{ + Computed: true, + Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", + ElementType: types.StringType, + Optional: true, + Validators: []validator.List{listvalidator.SizeAtMost(5)}, + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// NewDataSourceSchema returns datasource schema +func NewDataSourceSchema() datasource.ListNestedBlock { + return datasource.ListNestedBlock{ + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ + "kafka_topic": datasource.StringAttribute{ + Computed: true, + Description: "Topic name.", + }, + "selected_log_fields": datasource.ListAttribute{ + Computed: true, + Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", + ElementType: types.StringType, + Validators: []validator.List{listvalidator.SizeAtMost(5)}, + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// tfoUserConfig +type tfoUserConfig struct { + KafkaTopic types.String `tfsdk:"kafka_topic"` + SelectedLogFields types.List `tfsdk:"selected_log_fields"` +} + +// dtoUserConfig request/response object +type dtoUserConfig struct { + KafkaTopic string `groups:"create,update" json:"kafka_topic"` + SelectedLogFields []string `groups:"create,update" json:"selected_log_fields,omitempty"` +} + +// expandUserConfig expands tf object into dto object +func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { + selectedLogFieldsVar := schemautil.ExpandList[string](ctx, diags, o.SelectedLogFields) + if diags.HasError() { + return nil + } + return &dtoUserConfig{ + KafkaTopic: o.KafkaTopic.ValueString(), + SelectedLogFields: selectedLogFieldsVar, + } +} + +// flattenUserConfig flattens dto object into tf object +func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { + selectedLogFieldsVar, d := types.ListValueFrom(ctx, types.StringType, o.SelectedLogFields) + diags.Append(d...) + if diags.HasError() { + return nil + } + return &tfoUserConfig{ + KafkaTopic: types.StringValue(o.KafkaTopic), + SelectedLogFields: selectedLogFieldsVar, + } +} + +var userConfigAttrs = map[string]attr.Type{ + "kafka_topic": types.StringType, + "selected_log_fields": types.ListType{ElemType: types.StringType}, +} + +// Expand public function that converts tf object into dto +func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { + return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +} + +// Flatten public function that converts dto into tf object +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { + o := new(dtoUserConfig) + err := schemautil.MapToDTO(m, o) + if err != nil { + diags.AddError("failed to marshal map user config to dto", err.Error()) + return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + } + return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) +} diff --git a/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs_test.go b/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs_test.go new file mode 100644 index 000000000..03a1c9ecc --- /dev/null +++ b/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs_test.go @@ -0,0 +1,78 @@ +// Code generated by user config generator. DO NOT EDIT. + +package kafkalogs + +import ( + "context" + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/stretchr/testify/require" + + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +const allFields = `{ + "kafka_topic": "foo", + "selected_log_fields": [ + "foo" + ] +}` +const updateOnlyFields = `{ + "kafka_topic": "foo", + "selected_log_fields": [ + "foo" + ] +}` + +func TestUserConfig(t *testing.T) { + cases := []struct { + name string + source string + expect string + marshal func(any) (map[string]any, error) + }{ + { + name: "fields to create resource", + source: allFields, + expect: allFields, + marshal: schemautil.MarshalCreateUserConfig, + }, + { + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + marshal: schemautil.MarshalUpdateUserConfig, + }, + } + + ctx := context.Background() + diags := new(diag.Diagnostics) + for _, opt := range cases { + t.Run(opt.name, func(t *testing.T) { + dto := new(dtoUserConfig) + err := json.Unmarshal([]byte(opt.source), dto) + require.NoError(t, err) + + // From json to TF + tfo := flattenUserConfig(ctx, diags, dto) + require.Empty(t, diags) + + // From TF to json + config := expandUserConfig(ctx, diags, tfo) + require.Empty(t, diags) + + // Run specific marshal (create or update resource) + dtoConfig, err := opt.marshal(config) + require.NoError(t, err) + + // Compares that output is strictly equal to the input + // If so, the flow is valid + b, err := json.MarshalIndent(dtoConfig, "", " ") + require.NoError(t, err) + require.Empty(t, cmp.Diff(opt.expect, string(b))) + }) + } +} diff --git a/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker.go b/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker.go new file mode 100644 index 000000000..548514588 --- /dev/null +++ b/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker.go @@ -0,0 +1,220 @@ +// Code generated by user config generator. DO NOT EDIT. + +package kafkamirrormaker + +import ( + "context" + + listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + attr "github.com/hashicorp/terraform-plugin-framework/attr" + datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + diag "github.com/hashicorp/terraform-plugin-framework/diag" + resource "github.com/hashicorp/terraform-plugin-framework/resource/schema" + validator "github.com/hashicorp/terraform-plugin-framework/schema/validator" + types "github.com/hashicorp/terraform-plugin-framework/types" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +// NewResourceSchema returns resource schema +func NewResourceSchema() resource.ListNestedBlock { + return resource.ListNestedBlock{ + Description: "Integration user config", + NestedObject: resource.NestedBlockObject{ + Attributes: map[string]resource.Attribute{"cluster_alias": resource.StringAttribute{ + Computed: true, + Description: "The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, '.', '_', and '-'.", + Optional: true, + }}, + Blocks: map[string]resource.Block{"kafka_mirrormaker": resource.ListNestedBlock{ + Description: "Kafka MirrorMaker configuration values", + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ + "consumer_fetch_min_bytes": resource.Int64Attribute{ + Computed: true, + Description: "The minimum amount of data the server should return for a fetch request.", + Optional: true, + }, + "producer_batch_size": resource.Int64Attribute{ + Computed: true, + Description: "The batch size in bytes producer will attempt to collect before publishing to broker.", + Optional: true, + }, + "producer_buffer_memory": resource.Int64Attribute{ + Computed: true, + Description: "The amount of bytes producer can use for buffering data before publishing to broker.", + Optional: true, + }, + "producer_compression_type": resource.StringAttribute{ + Computed: true, + Description: "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", + Optional: true, + }, + "producer_linger_ms": resource.Int64Attribute{ + Computed: true, + Description: "The linger time (ms) for waiting new data to arrive for publishing.", + Optional: true, + }, + "producer_max_request_size": resource.Int64Attribute{ + Computed: true, + Description: "The maximum request size in bytes.", + Optional: true, + }, + }}, + }}, + }, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// NewDataSourceSchema returns datasource schema +func NewDataSourceSchema() datasource.ListNestedBlock { + return datasource.ListNestedBlock{ + Description: "Integration user config", + NestedObject: datasource.NestedBlockObject{ + Attributes: map[string]datasource.Attribute{"cluster_alias": datasource.StringAttribute{ + Computed: true, + Description: "The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, '.', '_', and '-'.", + }}, + Blocks: map[string]datasource.Block{"kafka_mirrormaker": datasource.ListNestedBlock{ + Description: "Kafka MirrorMaker configuration values", + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ + "consumer_fetch_min_bytes": datasource.Int64Attribute{ + Computed: true, + Description: "The minimum amount of data the server should return for a fetch request.", + }, + "producer_batch_size": datasource.Int64Attribute{ + Computed: true, + Description: "The batch size in bytes producer will attempt to collect before publishing to broker.", + }, + "producer_buffer_memory": datasource.Int64Attribute{ + Computed: true, + Description: "The amount of bytes producer can use for buffering data before publishing to broker.", + }, + "producer_compression_type": datasource.StringAttribute{ + Computed: true, + Description: "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", + }, + "producer_linger_ms": datasource.Int64Attribute{ + Computed: true, + Description: "The linger time (ms) for waiting new data to arrive for publishing.", + }, + "producer_max_request_size": datasource.Int64Attribute{ + Computed: true, + Description: "The maximum request size in bytes.", + }, + }}, + }}, + }, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// tfoUserConfig Integration user config +type tfoUserConfig struct { + ClusterAlias types.String `tfsdk:"cluster_alias"` + KafkaMirrormaker types.List `tfsdk:"kafka_mirrormaker"` +} + +// dtoUserConfig request/response object +type dtoUserConfig struct { + ClusterAlias *string `groups:"create,update" json:"cluster_alias,omitempty"` + KafkaMirrormaker *dtoKafkaMirrormaker `groups:"create,update" json:"kafka_mirrormaker,omitempty"` +} + +// expandUserConfig expands tf object into dto object +func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { + kafkaMirrormakerVar := schemautil.ExpandListBlockNested[tfoKafkaMirrormaker, dtoKafkaMirrormaker](ctx, diags, expandKafkaMirrormaker, o.KafkaMirrormaker) + if diags.HasError() { + return nil + } + return &dtoUserConfig{ + ClusterAlias: schemautil.ValueStringPointer(o.ClusterAlias), + KafkaMirrormaker: kafkaMirrormakerVar, + } +} + +// flattenUserConfig flattens dto object into tf object +func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { + kafkaMirrormakerVar := schemautil.FlattenListBlockNested[dtoKafkaMirrormaker, tfoKafkaMirrormaker](ctx, diags, flattenKafkaMirrormaker, kafkaMirrormakerAttrs, o.KafkaMirrormaker) + if diags.HasError() { + return nil + } + return &tfoUserConfig{ + ClusterAlias: types.StringPointerValue(o.ClusterAlias), + KafkaMirrormaker: kafkaMirrormakerVar, + } +} + +var userConfigAttrs = map[string]attr.Type{ + "cluster_alias": types.StringType, + "kafka_mirrormaker": types.ListType{ElemType: types.ObjectType{AttrTypes: kafkaMirrormakerAttrs}}, +} + +// tfoKafkaMirrormaker Kafka MirrorMaker configuration values +type tfoKafkaMirrormaker struct { + ConsumerFetchMinBytes types.Int64 `tfsdk:"consumer_fetch_min_bytes"` + ProducerBatchSize types.Int64 `tfsdk:"producer_batch_size"` + ProducerBufferMemory types.Int64 `tfsdk:"producer_buffer_memory"` + ProducerCompressionType types.String `tfsdk:"producer_compression_type"` + ProducerLingerMs types.Int64 `tfsdk:"producer_linger_ms"` + ProducerMaxRequestSize types.Int64 `tfsdk:"producer_max_request_size"` +} + +// dtoKafkaMirrormaker request/response object +type dtoKafkaMirrormaker struct { + ConsumerFetchMinBytes *int64 `groups:"create,update" json:"consumer_fetch_min_bytes,omitempty"` + ProducerBatchSize *int64 `groups:"create,update" json:"producer_batch_size,omitempty"` + ProducerBufferMemory *int64 `groups:"create,update" json:"producer_buffer_memory,omitempty"` + ProducerCompressionType *string `groups:"create,update" json:"producer_compression_type,omitempty"` + ProducerLingerMs *int64 `groups:"create,update" json:"producer_linger_ms,omitempty"` + ProducerMaxRequestSize *int64 `groups:"create,update" json:"producer_max_request_size,omitempty"` +} + +// expandKafkaMirrormaker expands tf object into dto object +func expandKafkaMirrormaker(ctx context.Context, diags *diag.Diagnostics, o *tfoKafkaMirrormaker) *dtoKafkaMirrormaker { + return &dtoKafkaMirrormaker{ + ConsumerFetchMinBytes: schemautil.ValueInt64Pointer(o.ConsumerFetchMinBytes), + ProducerBatchSize: schemautil.ValueInt64Pointer(o.ProducerBatchSize), + ProducerBufferMemory: schemautil.ValueInt64Pointer(o.ProducerBufferMemory), + ProducerCompressionType: schemautil.ValueStringPointer(o.ProducerCompressionType), + ProducerLingerMs: schemautil.ValueInt64Pointer(o.ProducerLingerMs), + ProducerMaxRequestSize: schemautil.ValueInt64Pointer(o.ProducerMaxRequestSize), + } +} + +// flattenKafkaMirrormaker flattens dto object into tf object +func flattenKafkaMirrormaker(ctx context.Context, diags *diag.Diagnostics, o *dtoKafkaMirrormaker) *tfoKafkaMirrormaker { + return &tfoKafkaMirrormaker{ + ConsumerFetchMinBytes: types.Int64PointerValue(o.ConsumerFetchMinBytes), + ProducerBatchSize: types.Int64PointerValue(o.ProducerBatchSize), + ProducerBufferMemory: types.Int64PointerValue(o.ProducerBufferMemory), + ProducerCompressionType: types.StringPointerValue(o.ProducerCompressionType), + ProducerLingerMs: types.Int64PointerValue(o.ProducerLingerMs), + ProducerMaxRequestSize: types.Int64PointerValue(o.ProducerMaxRequestSize), + } +} + +var kafkaMirrormakerAttrs = map[string]attr.Type{ + "consumer_fetch_min_bytes": types.Int64Type, + "producer_batch_size": types.Int64Type, + "producer_buffer_memory": types.Int64Type, + "producer_compression_type": types.StringType, + "producer_linger_ms": types.Int64Type, + "producer_max_request_size": types.Int64Type, +} + +// Expand public function that converts tf object into dto +func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { + return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +} + +// Flatten public function that converts dto into tf object +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { + o := new(dtoUserConfig) + err := schemautil.MapToDTO(m, o) + if err != nil { + diags.AddError("failed to marshal map user config to dto", err.Error()) + return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + } + return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) +} diff --git a/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker_test.go b/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker_test.go new file mode 100644 index 000000000..1e269a0d6 --- /dev/null +++ b/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker_test.go @@ -0,0 +1,88 @@ +// Code generated by user config generator. DO NOT EDIT. + +package kafkamirrormaker + +import ( + "context" + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/stretchr/testify/require" + + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +const allFields = `{ + "cluster_alias": "foo", + "kafka_mirrormaker": { + "consumer_fetch_min_bytes": 1, + "producer_batch_size": 1, + "producer_buffer_memory": 1, + "producer_compression_type": "foo", + "producer_linger_ms": 1, + "producer_max_request_size": 1 + } +}` +const updateOnlyFields = `{ + "cluster_alias": "foo", + "kafka_mirrormaker": { + "consumer_fetch_min_bytes": 1, + "producer_batch_size": 1, + "producer_buffer_memory": 1, + "producer_compression_type": "foo", + "producer_linger_ms": 1, + "producer_max_request_size": 1 + } +}` + +func TestUserConfig(t *testing.T) { + cases := []struct { + name string + source string + expect string + marshal func(any) (map[string]any, error) + }{ + { + name: "fields to create resource", + source: allFields, + expect: allFields, + marshal: schemautil.MarshalCreateUserConfig, + }, + { + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + marshal: schemautil.MarshalUpdateUserConfig, + }, + } + + ctx := context.Background() + diags := new(diag.Diagnostics) + for _, opt := range cases { + t.Run(opt.name, func(t *testing.T) { + dto := new(dtoUserConfig) + err := json.Unmarshal([]byte(opt.source), dto) + require.NoError(t, err) + + // From json to TF + tfo := flattenUserConfig(ctx, diags, dto) + require.Empty(t, diags) + + // From TF to json + config := expandUserConfig(ctx, diags, tfo) + require.Empty(t, diags) + + // Run specific marshal (create or update resource) + dtoConfig, err := opt.marshal(config) + require.NoError(t, err) + + // Compares that output is strictly equal to the input + // If so, the flow is valid + b, err := json.MarshalIndent(dtoConfig, "", " ") + require.NoError(t, err) + require.Empty(t, cmp.Diff(opt.expect, string(b))) + }) + } +} diff --git a/internal/plugin/service/userconfig/integration/logs/logs.go b/internal/plugin/service/userconfig/integration/logs/logs.go new file mode 100644 index 000000000..7a9be7d51 --- /dev/null +++ b/internal/plugin/service/userconfig/integration/logs/logs.go @@ -0,0 +1,133 @@ +// Code generated by user config generator. DO NOT EDIT. + +package logs + +import ( + "context" + + listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + attr "github.com/hashicorp/terraform-plugin-framework/attr" + datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + diag "github.com/hashicorp/terraform-plugin-framework/diag" + resource "github.com/hashicorp/terraform-plugin-framework/resource/schema" + int64default "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64default" + stringdefault "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + validator "github.com/hashicorp/terraform-plugin-framework/schema/validator" + types "github.com/hashicorp/terraform-plugin-framework/types" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +// NewResourceSchema returns resource schema +func NewResourceSchema() resource.ListNestedBlock { + return resource.ListNestedBlock{ + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ + "elasticsearch_index_days_max": resource.Int64Attribute{ + Computed: true, + Default: int64default.StaticInt64(3), + Description: "Elasticsearch index retention limit. The default value is `3`.", + Optional: true, + }, + "elasticsearch_index_prefix": resource.StringAttribute{ + Computed: true, + Default: stringdefault.StaticString("logs"), + Description: "Elasticsearch index prefix. The default value is `logs`.", + Optional: true, + }, + "selected_log_fields": resource.ListAttribute{ + Computed: true, + Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", + ElementType: types.StringType, + Optional: true, + Validators: []validator.List{listvalidator.SizeAtMost(5)}, + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// NewDataSourceSchema returns datasource schema +func NewDataSourceSchema() datasource.ListNestedBlock { + return datasource.ListNestedBlock{ + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ + "elasticsearch_index_days_max": datasource.Int64Attribute{ + Computed: true, + Description: "Elasticsearch index retention limit. The default value is `3`.", + }, + "elasticsearch_index_prefix": datasource.StringAttribute{ + Computed: true, + Description: "Elasticsearch index prefix. The default value is `logs`.", + }, + "selected_log_fields": datasource.ListAttribute{ + Computed: true, + Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", + ElementType: types.StringType, + Validators: []validator.List{listvalidator.SizeAtMost(5)}, + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// tfoUserConfig +type tfoUserConfig struct { + ElasticsearchIndexDaysMax types.Int64 `tfsdk:"elasticsearch_index_days_max"` + ElasticsearchIndexPrefix types.String `tfsdk:"elasticsearch_index_prefix"` + SelectedLogFields types.List `tfsdk:"selected_log_fields"` +} + +// dtoUserConfig request/response object +type dtoUserConfig struct { + ElasticsearchIndexDaysMax *int64 `groups:"create,update" json:"elasticsearch_index_days_max,omitempty"` + ElasticsearchIndexPrefix *string `groups:"create,update" json:"elasticsearch_index_prefix,omitempty"` + SelectedLogFields []string `groups:"create,update" json:"selected_log_fields,omitempty"` +} + +// expandUserConfig expands tf object into dto object +func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { + selectedLogFieldsVar := schemautil.ExpandList[string](ctx, diags, o.SelectedLogFields) + if diags.HasError() { + return nil + } + return &dtoUserConfig{ + ElasticsearchIndexDaysMax: schemautil.ValueInt64Pointer(o.ElasticsearchIndexDaysMax), + ElasticsearchIndexPrefix: schemautil.ValueStringPointer(o.ElasticsearchIndexPrefix), + SelectedLogFields: selectedLogFieldsVar, + } +} + +// flattenUserConfig flattens dto object into tf object +func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { + selectedLogFieldsVar, d := types.ListValueFrom(ctx, types.StringType, o.SelectedLogFields) + diags.Append(d...) + if diags.HasError() { + return nil + } + return &tfoUserConfig{ + ElasticsearchIndexDaysMax: types.Int64PointerValue(o.ElasticsearchIndexDaysMax), + ElasticsearchIndexPrefix: types.StringPointerValue(o.ElasticsearchIndexPrefix), + SelectedLogFields: selectedLogFieldsVar, + } +} + +var userConfigAttrs = map[string]attr.Type{ + "elasticsearch_index_days_max": types.Int64Type, + "elasticsearch_index_prefix": types.StringType, + "selected_log_fields": types.ListType{ElemType: types.StringType}, +} + +// Expand public function that converts tf object into dto +func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { + return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +} + +// Flatten public function that converts dto into tf object +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { + o := new(dtoUserConfig) + err := schemautil.MapToDTO(m, o) + if err != nil { + diags.AddError("failed to marshal map user config to dto", err.Error()) + return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + } + return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) +} diff --git a/internal/plugin/service/userconfig/integration/logs/logs_test.go b/internal/plugin/service/userconfig/integration/logs/logs_test.go new file mode 100644 index 000000000..9635dcc31 --- /dev/null +++ b/internal/plugin/service/userconfig/integration/logs/logs_test.go @@ -0,0 +1,80 @@ +// Code generated by user config generator. DO NOT EDIT. + +package logs + +import ( + "context" + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/stretchr/testify/require" + + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +const allFields = `{ + "elasticsearch_index_days_max": 1, + "elasticsearch_index_prefix": "foo", + "selected_log_fields": [ + "foo" + ] +}` +const updateOnlyFields = `{ + "elasticsearch_index_days_max": 1, + "elasticsearch_index_prefix": "foo", + "selected_log_fields": [ + "foo" + ] +}` + +func TestUserConfig(t *testing.T) { + cases := []struct { + name string + source string + expect string + marshal func(any) (map[string]any, error) + }{ + { + name: "fields to create resource", + source: allFields, + expect: allFields, + marshal: schemautil.MarshalCreateUserConfig, + }, + { + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + marshal: schemautil.MarshalUpdateUserConfig, + }, + } + + ctx := context.Background() + diags := new(diag.Diagnostics) + for _, opt := range cases { + t.Run(opt.name, func(t *testing.T) { + dto := new(dtoUserConfig) + err := json.Unmarshal([]byte(opt.source), dto) + require.NoError(t, err) + + // From json to TF + tfo := flattenUserConfig(ctx, diags, dto) + require.Empty(t, diags) + + // From TF to json + config := expandUserConfig(ctx, diags, tfo) + require.Empty(t, diags) + + // Run specific marshal (create or update resource) + dtoConfig, err := opt.marshal(config) + require.NoError(t, err) + + // Compares that output is strictly equal to the input + // If so, the flow is valid + b, err := json.MarshalIndent(dtoConfig, "", " ") + require.NoError(t, err) + require.Empty(t, cmp.Diff(opt.expect, string(b))) + }) + } +} diff --git a/internal/plugin/service/userconfig/integration/metrics/metrics.go b/internal/plugin/service/userconfig/integration/metrics/metrics.go new file mode 100644 index 000000000..7670348fc --- /dev/null +++ b/internal/plugin/service/userconfig/integration/metrics/metrics.go @@ -0,0 +1,414 @@ +// Code generated by user config generator. DO NOT EDIT. + +package metrics + +import ( + "context" + + listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + attr "github.com/hashicorp/terraform-plugin-framework/attr" + datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + diag "github.com/hashicorp/terraform-plugin-framework/diag" + resource "github.com/hashicorp/terraform-plugin-framework/resource/schema" + validator "github.com/hashicorp/terraform-plugin-framework/schema/validator" + types "github.com/hashicorp/terraform-plugin-framework/types" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +// NewResourceSchema returns resource schema +func NewResourceSchema() resource.ListNestedBlock { + return resource.ListNestedBlock{ + Description: "Integration user config", + NestedObject: resource.NestedBlockObject{ + Attributes: map[string]resource.Attribute{ + "database": resource.StringAttribute{ + Computed: true, + Description: "Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to 'metrics'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.", + Optional: true, + }, + "retention_days": resource.Int64Attribute{ + Computed: true, + Description: "Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.", + Optional: true, + }, + "ro_username": resource.StringAttribute{ + Computed: true, + Description: "Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to 'metrics_reader'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.", + Optional: true, + }, + "username": resource.StringAttribute{ + Computed: true, + Description: "Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to 'metrics_writer'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.", + Optional: true, + }, + }, + Blocks: map[string]resource.Block{"source_mysql": resource.ListNestedBlock{ + Description: "Configuration options for metrics where source service is MySQL", + NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"telegraf": resource.ListNestedBlock{ + Description: "Configuration options for Telegraf MySQL input plugin", + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ + "gather_event_waits": resource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.", + Optional: true, + }, + "gather_file_events_stats": resource.BoolAttribute{ + Computed: true, + Description: "gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.", + Optional: true, + }, + "gather_index_io_waits": resource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.", + Optional: true, + }, + "gather_info_schema_auto_inc": resource.BoolAttribute{ + Computed: true, + Description: "Gather auto_increment columns and max values from information schema.", + Optional: true, + }, + "gather_innodb_metrics": resource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.", + Optional: true, + }, + "gather_perf_events_statements": resource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.", + Optional: true, + }, + "gather_process_list": resource.BoolAttribute{ + Computed: true, + Description: "Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.", + Optional: true, + }, + "gather_slave_status": resource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from SHOW SLAVE STATUS command output.", + Optional: true, + }, + "gather_table_io_waits": resource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.", + Optional: true, + }, + "gather_table_lock_waits": resource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.", + Optional: true, + }, + "gather_table_schema": resource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from INFORMATION_SCHEMA.TABLES.", + Optional: true, + }, + "perf_events_statements_digest_text_limit": resource.Int64Attribute{ + Computed: true, + Description: "Truncates digest text from perf_events_statements into this many characters.", + Optional: true, + }, + "perf_events_statements_limit": resource.Int64Attribute{ + Computed: true, + Description: "Limits metrics from perf_events_statements.", + Optional: true, + }, + "perf_events_statements_time_limit": resource.Int64Attribute{ + Computed: true, + Description: "Only include perf_events_statements whose last seen is less than this many seconds.", + Optional: true, + }, + }}, + }}}, + }}, + }, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// NewDataSourceSchema returns datasource schema +func NewDataSourceSchema() datasource.ListNestedBlock { + return datasource.ListNestedBlock{ + Description: "Integration user config", + NestedObject: datasource.NestedBlockObject{ + Attributes: map[string]datasource.Attribute{ + "database": datasource.StringAttribute{ + Computed: true, + Description: "Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to 'metrics'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.", + }, + "retention_days": datasource.Int64Attribute{ + Computed: true, + Description: "Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.", + }, + "ro_username": datasource.StringAttribute{ + Computed: true, + Description: "Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to 'metrics_reader'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.", + }, + "username": datasource.StringAttribute{ + Computed: true, + Description: "Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to 'metrics_writer'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.", + }, + }, + Blocks: map[string]datasource.Block{"source_mysql": datasource.ListNestedBlock{ + Description: "Configuration options for metrics where source service is MySQL", + NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"telegraf": datasource.ListNestedBlock{ + Description: "Configuration options for Telegraf MySQL input plugin", + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ + "gather_event_waits": datasource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.", + }, + "gather_file_events_stats": datasource.BoolAttribute{ + Computed: true, + Description: "gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.", + }, + "gather_index_io_waits": datasource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.", + }, + "gather_info_schema_auto_inc": datasource.BoolAttribute{ + Computed: true, + Description: "Gather auto_increment columns and max values from information schema.", + }, + "gather_innodb_metrics": datasource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.", + }, + "gather_perf_events_statements": datasource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.", + }, + "gather_process_list": datasource.BoolAttribute{ + Computed: true, + Description: "Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.", + }, + "gather_slave_status": datasource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from SHOW SLAVE STATUS command output.", + }, + "gather_table_io_waits": datasource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.", + }, + "gather_table_lock_waits": datasource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.", + }, + "gather_table_schema": datasource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from INFORMATION_SCHEMA.TABLES.", + }, + "perf_events_statements_digest_text_limit": datasource.Int64Attribute{ + Computed: true, + Description: "Truncates digest text from perf_events_statements into this many characters.", + }, + "perf_events_statements_limit": datasource.Int64Attribute{ + Computed: true, + Description: "Limits metrics from perf_events_statements.", + }, + "perf_events_statements_time_limit": datasource.Int64Attribute{ + Computed: true, + Description: "Only include perf_events_statements whose last seen is less than this many seconds.", + }, + }}, + }}}, + }}, + }, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// tfoUserConfig Integration user config +type tfoUserConfig struct { + Database types.String `tfsdk:"database"` + RetentionDays types.Int64 `tfsdk:"retention_days"` + RoUsername types.String `tfsdk:"ro_username"` + SourceMysql types.List `tfsdk:"source_mysql"` + Username types.String `tfsdk:"username"` +} + +// dtoUserConfig request/response object +type dtoUserConfig struct { + Database *string `groups:"create,update" json:"database,omitempty"` + RetentionDays *int64 `groups:"create,update" json:"retention_days,omitempty"` + RoUsername *string `groups:"create,update" json:"ro_username,omitempty"` + SourceMysql *dtoSourceMysql `groups:"create,update" json:"source_mysql,omitempty"` + Username *string `groups:"create,update" json:"username,omitempty"` +} + +// expandUserConfig expands tf object into dto object +func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { + sourceMysqlVar := schemautil.ExpandListBlockNested[tfoSourceMysql, dtoSourceMysql](ctx, diags, expandSourceMysql, o.SourceMysql) + if diags.HasError() { + return nil + } + return &dtoUserConfig{ + Database: schemautil.ValueStringPointer(o.Database), + RetentionDays: schemautil.ValueInt64Pointer(o.RetentionDays), + RoUsername: schemautil.ValueStringPointer(o.RoUsername), + SourceMysql: sourceMysqlVar, + Username: schemautil.ValueStringPointer(o.Username), + } +} + +// flattenUserConfig flattens dto object into tf object +func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { + sourceMysqlVar := schemautil.FlattenListBlockNested[dtoSourceMysql, tfoSourceMysql](ctx, diags, flattenSourceMysql, sourceMysqlAttrs, o.SourceMysql) + if diags.HasError() { + return nil + } + return &tfoUserConfig{ + Database: types.StringPointerValue(o.Database), + RetentionDays: types.Int64PointerValue(o.RetentionDays), + RoUsername: types.StringPointerValue(o.RoUsername), + SourceMysql: sourceMysqlVar, + Username: types.StringPointerValue(o.Username), + } +} + +var userConfigAttrs = map[string]attr.Type{ + "database": types.StringType, + "retention_days": types.Int64Type, + "ro_username": types.StringType, + "source_mysql": types.ListType{ElemType: types.ObjectType{AttrTypes: sourceMysqlAttrs}}, + "username": types.StringType, +} + +// tfoSourceMysql Configuration options for metrics where source service is MySQL +type tfoSourceMysql struct { + Telegraf types.List `tfsdk:"telegraf"` +} + +// dtoSourceMysql request/response object +type dtoSourceMysql struct { + Telegraf *dtoTelegraf `groups:"create,update" json:"telegraf,omitempty"` +} + +// expandSourceMysql expands tf object into dto object +func expandSourceMysql(ctx context.Context, diags *diag.Diagnostics, o *tfoSourceMysql) *dtoSourceMysql { + telegrafVar := schemautil.ExpandListBlockNested[tfoTelegraf, dtoTelegraf](ctx, diags, expandTelegraf, o.Telegraf) + if diags.HasError() { + return nil + } + return &dtoSourceMysql{Telegraf: telegrafVar} +} + +// flattenSourceMysql flattens dto object into tf object +func flattenSourceMysql(ctx context.Context, diags *diag.Diagnostics, o *dtoSourceMysql) *tfoSourceMysql { + telegrafVar := schemautil.FlattenListBlockNested[dtoTelegraf, tfoTelegraf](ctx, diags, flattenTelegraf, telegrafAttrs, o.Telegraf) + if diags.HasError() { + return nil + } + return &tfoSourceMysql{Telegraf: telegrafVar} +} + +var sourceMysqlAttrs = map[string]attr.Type{"telegraf": types.ListType{ElemType: types.ObjectType{AttrTypes: telegrafAttrs}}} + +// tfoTelegraf Configuration options for Telegraf MySQL input plugin +type tfoTelegraf struct { + GatherEventWaits types.Bool `tfsdk:"gather_event_waits"` + GatherFileEventsStats types.Bool `tfsdk:"gather_file_events_stats"` + GatherIndexIoWaits types.Bool `tfsdk:"gather_index_io_waits"` + GatherInfoSchemaAutoInc types.Bool `tfsdk:"gather_info_schema_auto_inc"` + GatherInnodbMetrics types.Bool `tfsdk:"gather_innodb_metrics"` + GatherPerfEventsStatements types.Bool `tfsdk:"gather_perf_events_statements"` + GatherProcessList types.Bool `tfsdk:"gather_process_list"` + GatherSlaveStatus types.Bool `tfsdk:"gather_slave_status"` + GatherTableIoWaits types.Bool `tfsdk:"gather_table_io_waits"` + GatherTableLockWaits types.Bool `tfsdk:"gather_table_lock_waits"` + GatherTableSchema types.Bool `tfsdk:"gather_table_schema"` + PerfEventsStatementsDigestTextLimit types.Int64 `tfsdk:"perf_events_statements_digest_text_limit"` + PerfEventsStatementsLimit types.Int64 `tfsdk:"perf_events_statements_limit"` + PerfEventsStatementsTimeLimit types.Int64 `tfsdk:"perf_events_statements_time_limit"` +} + +// dtoTelegraf request/response object +type dtoTelegraf struct { + GatherEventWaits *bool `groups:"create,update" json:"gather_event_waits,omitempty"` + GatherFileEventsStats *bool `groups:"create,update" json:"gather_file_events_stats,omitempty"` + GatherIndexIoWaits *bool `groups:"create,update" json:"gather_index_io_waits,omitempty"` + GatherInfoSchemaAutoInc *bool `groups:"create,update" json:"gather_info_schema_auto_inc,omitempty"` + GatherInnodbMetrics *bool `groups:"create,update" json:"gather_innodb_metrics,omitempty"` + GatherPerfEventsStatements *bool `groups:"create,update" json:"gather_perf_events_statements,omitempty"` + GatherProcessList *bool `groups:"create,update" json:"gather_process_list,omitempty"` + GatherSlaveStatus *bool `groups:"create,update" json:"gather_slave_status,omitempty"` + GatherTableIoWaits *bool `groups:"create,update" json:"gather_table_io_waits,omitempty"` + GatherTableLockWaits *bool `groups:"create,update" json:"gather_table_lock_waits,omitempty"` + GatherTableSchema *bool `groups:"create,update" json:"gather_table_schema,omitempty"` + PerfEventsStatementsDigestTextLimit *int64 `groups:"create,update" json:"perf_events_statements_digest_text_limit,omitempty"` + PerfEventsStatementsLimit *int64 `groups:"create,update" json:"perf_events_statements_limit,omitempty"` + PerfEventsStatementsTimeLimit *int64 `groups:"create,update" json:"perf_events_statements_time_limit,omitempty"` +} + +// expandTelegraf expands tf object into dto object +func expandTelegraf(ctx context.Context, diags *diag.Diagnostics, o *tfoTelegraf) *dtoTelegraf { + return &dtoTelegraf{ + GatherEventWaits: schemautil.ValueBoolPointer(o.GatherEventWaits), + GatherFileEventsStats: schemautil.ValueBoolPointer(o.GatherFileEventsStats), + GatherIndexIoWaits: schemautil.ValueBoolPointer(o.GatherIndexIoWaits), + GatherInfoSchemaAutoInc: schemautil.ValueBoolPointer(o.GatherInfoSchemaAutoInc), + GatherInnodbMetrics: schemautil.ValueBoolPointer(o.GatherInnodbMetrics), + GatherPerfEventsStatements: schemautil.ValueBoolPointer(o.GatherPerfEventsStatements), + GatherProcessList: schemautil.ValueBoolPointer(o.GatherProcessList), + GatherSlaveStatus: schemautil.ValueBoolPointer(o.GatherSlaveStatus), + GatherTableIoWaits: schemautil.ValueBoolPointer(o.GatherTableIoWaits), + GatherTableLockWaits: schemautil.ValueBoolPointer(o.GatherTableLockWaits), + GatherTableSchema: schemautil.ValueBoolPointer(o.GatherTableSchema), + PerfEventsStatementsDigestTextLimit: schemautil.ValueInt64Pointer(o.PerfEventsStatementsDigestTextLimit), + PerfEventsStatementsLimit: schemautil.ValueInt64Pointer(o.PerfEventsStatementsLimit), + PerfEventsStatementsTimeLimit: schemautil.ValueInt64Pointer(o.PerfEventsStatementsTimeLimit), + } +} + +// flattenTelegraf flattens dto object into tf object +func flattenTelegraf(ctx context.Context, diags *diag.Diagnostics, o *dtoTelegraf) *tfoTelegraf { + return &tfoTelegraf{ + GatherEventWaits: types.BoolPointerValue(o.GatherEventWaits), + GatherFileEventsStats: types.BoolPointerValue(o.GatherFileEventsStats), + GatherIndexIoWaits: types.BoolPointerValue(o.GatherIndexIoWaits), + GatherInfoSchemaAutoInc: types.BoolPointerValue(o.GatherInfoSchemaAutoInc), + GatherInnodbMetrics: types.BoolPointerValue(o.GatherInnodbMetrics), + GatherPerfEventsStatements: types.BoolPointerValue(o.GatherPerfEventsStatements), + GatherProcessList: types.BoolPointerValue(o.GatherProcessList), + GatherSlaveStatus: types.BoolPointerValue(o.GatherSlaveStatus), + GatherTableIoWaits: types.BoolPointerValue(o.GatherTableIoWaits), + GatherTableLockWaits: types.BoolPointerValue(o.GatherTableLockWaits), + GatherTableSchema: types.BoolPointerValue(o.GatherTableSchema), + PerfEventsStatementsDigestTextLimit: types.Int64PointerValue(o.PerfEventsStatementsDigestTextLimit), + PerfEventsStatementsLimit: types.Int64PointerValue(o.PerfEventsStatementsLimit), + PerfEventsStatementsTimeLimit: types.Int64PointerValue(o.PerfEventsStatementsTimeLimit), + } +} + +var telegrafAttrs = map[string]attr.Type{ + "gather_event_waits": types.BoolType, + "gather_file_events_stats": types.BoolType, + "gather_index_io_waits": types.BoolType, + "gather_info_schema_auto_inc": types.BoolType, + "gather_innodb_metrics": types.BoolType, + "gather_perf_events_statements": types.BoolType, + "gather_process_list": types.BoolType, + "gather_slave_status": types.BoolType, + "gather_table_io_waits": types.BoolType, + "gather_table_lock_waits": types.BoolType, + "gather_table_schema": types.BoolType, + "perf_events_statements_digest_text_limit": types.Int64Type, + "perf_events_statements_limit": types.Int64Type, + "perf_events_statements_time_limit": types.Int64Type, +} + +// Expand public function that converts tf object into dto +func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { + return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +} + +// Flatten public function that converts dto into tf object +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { + o := new(dtoUserConfig) + err := schemautil.MapToDTO(m, o) + if err != nil { + diags.AddError("failed to marshal map user config to dto", err.Error()) + return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + } + return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) +} diff --git a/internal/plugin/service/userconfig/integration/metrics/metrics_test.go b/internal/plugin/service/userconfig/integration/metrics/metrics_test.go new file mode 100644 index 000000000..07c3e6b2f --- /dev/null +++ b/internal/plugin/service/userconfig/integration/metrics/metrics_test.go @@ -0,0 +1,114 @@ +// Code generated by user config generator. DO NOT EDIT. + +package metrics + +import ( + "context" + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/stretchr/testify/require" + + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +const allFields = `{ + "database": "foo", + "retention_days": 1, + "ro_username": "foo", + "source_mysql": { + "telegraf": { + "gather_event_waits": true, + "gather_file_events_stats": true, + "gather_index_io_waits": true, + "gather_info_schema_auto_inc": true, + "gather_innodb_metrics": true, + "gather_perf_events_statements": true, + "gather_process_list": true, + "gather_slave_status": true, + "gather_table_io_waits": true, + "gather_table_lock_waits": true, + "gather_table_schema": true, + "perf_events_statements_digest_text_limit": 1, + "perf_events_statements_limit": 1, + "perf_events_statements_time_limit": 1 + } + }, + "username": "foo" +}` +const updateOnlyFields = `{ + "database": "foo", + "retention_days": 1, + "ro_username": "foo", + "source_mysql": { + "telegraf": { + "gather_event_waits": true, + "gather_file_events_stats": true, + "gather_index_io_waits": true, + "gather_info_schema_auto_inc": true, + "gather_innodb_metrics": true, + "gather_perf_events_statements": true, + "gather_process_list": true, + "gather_slave_status": true, + "gather_table_io_waits": true, + "gather_table_lock_waits": true, + "gather_table_schema": true, + "perf_events_statements_digest_text_limit": 1, + "perf_events_statements_limit": 1, + "perf_events_statements_time_limit": 1 + } + }, + "username": "foo" +}` + +func TestUserConfig(t *testing.T) { + cases := []struct { + name string + source string + expect string + marshal func(any) (map[string]any, error) + }{ + { + name: "fields to create resource", + source: allFields, + expect: allFields, + marshal: schemautil.MarshalCreateUserConfig, + }, + { + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + marshal: schemautil.MarshalUpdateUserConfig, + }, + } + + ctx := context.Background() + diags := new(diag.Diagnostics) + for _, opt := range cases { + t.Run(opt.name, func(t *testing.T) { + dto := new(dtoUserConfig) + err := json.Unmarshal([]byte(opt.source), dto) + require.NoError(t, err) + + // From json to TF + tfo := flattenUserConfig(ctx, diags, dto) + require.Empty(t, diags) + + // From TF to json + config := expandUserConfig(ctx, diags, tfo) + require.Empty(t, diags) + + // Run specific marshal (create or update resource) + dtoConfig, err := opt.marshal(config) + require.NoError(t, err) + + // Compares that output is strictly equal to the input + // If so, the flow is valid + b, err := json.MarshalIndent(dtoConfig, "", " ") + require.NoError(t, err) + require.Empty(t, cmp.Diff(opt.expect, string(b))) + }) + } +} diff --git a/internal/plugin/util/schema.go b/internal/plugin/util/schema.go index 4d41a64fb..388335f5a 100644 --- a/internal/plugin/util/schema.go +++ b/internal/plugin/util/schema.go @@ -10,14 +10,16 @@ import ( // GeneralizeSchema is a function that generalizes the schema by adding the common definitions to the schema. func GeneralizeSchema(ctx context.Context, s schema.Schema) schema.Schema { - s.Blocks = map[string]schema.Block{ - "timeouts": timeouts.Block(ctx, timeouts.Opts{ - Create: true, - Read: true, - Update: true, - Delete: true, - }), + if s.Blocks == nil { + s.Blocks = make(map[string]schema.Block) } + s.Blocks["timeouts"] = timeouts.Block(ctx, timeouts.Opts{ + Create: true, + Read: true, + Update: true, + Delete: true, + }) + return s } diff --git a/internal/plugin/util/wait.go b/internal/plugin/util/wait.go new file mode 100644 index 000000000..a4393d46c --- /dev/null +++ b/internal/plugin/util/wait.go @@ -0,0 +1,20 @@ +package util + +import ( + "context" + "time" + + "github.com/avast/retry-go" +) + +// WaitActive waits for resource activity (for example) +// Top timeout comes from the context, no need to parse timeouts from the object. +// But eventually (attempts + connection timeout) * delay makes less timeout than we usually use (20 minutes or more) +func WaitActive(ctx context.Context, retryableFunc retry.RetryableFunc) error { + return retry.Do( + retryableFunc, + retry.Context(ctx), + retry.Attempts(10), + retry.Delay(2*time.Second), + ) +} diff --git a/internal/schemautil/plugin.go b/internal/schemautil/plugin.go new file mode 100644 index 000000000..b06dd8635 --- /dev/null +++ b/internal/schemautil/plugin.go @@ -0,0 +1,153 @@ +package schemautil + +import ( + "context" + "encoding/json" + "reflect" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/liip/sheriff" +) + +func ExpandList[T any](ctx context.Context, diags *diag.Diagnostics, list types.List) (items []T) { + if list.IsUnknown() || list.IsNull() { + return nil + } + diags.Append(list.ElementsAs(ctx, &items, false)...) + return items +} + +type Expander[T, K any] func(ctx context.Context, diags *diag.Diagnostics, o *T) *K + +func ExpandListNested[T, K any](ctx context.Context, diags *diag.Diagnostics, expand Expander[T, K], list types.List) []*K { + expanded := ExpandList[T](ctx, diags, list) + if expanded == nil || diags.HasError() { + return nil + } + + items := make([]*K, 0, len(expanded)) + for _, v := range expanded { + items = append(items, expand(ctx, diags, &v)) + if diags.HasError() { + return make([]*K, 0) + } + } + return items +} + +func ExpandListBlockNested[T, K any](ctx context.Context, diags *diag.Diagnostics, expand Expander[T, K], list types.List) *K { + items := ExpandListNested(ctx, diags, expand, list) + if len(items) == 0 { + return nil + } + return items[0] +} + +type Flattener[T, K any] func(ctx context.Context, diags *diag.Diagnostics, o *T) *K + +func FlattenListNested[T, K any](ctx context.Context, diags *diag.Diagnostics, flatten Flattener[T, K], attrs map[string]attr.Type, list []*T) types.List { + oType := types.ObjectType{AttrTypes: attrs} + empty := types.ListValueMust(oType, []attr.Value{}) + items := make([]*K, 0, len(list)) + for _, v := range list { + items = append(items, flatten(ctx, diags, v)) + if diags.HasError() { + return empty + } + } + + result, d := types.ListValueFrom(ctx, oType, items) + diags.Append(d...) + if diags.HasError() { + return empty + } + return result +} + +func FlattenListBlockNested[T, K any](ctx context.Context, diags *diag.Diagnostics, flatten Flattener[T, K], attrs map[string]attr.Type, o *T) types.List { + if o == nil { + return types.ListValueMust(types.ObjectType{AttrTypes: attrs}, []attr.Value{}) + } + return FlattenListNested(ctx, diags, flatten, attrs, []*T{o}) +} + +// marshalUserConfig converts user config into json +func marshalUserConfig(c any, groups ...string) (map[string]any, error) { + if c == nil || (reflect.ValueOf(c).Kind() == reflect.Ptr && reflect.ValueOf(c).IsNil()) { + return nil, nil + } + + o := &sheriff.Options{ + Groups: groups, + } + + i, err := sheriff.Marshal(o, c) + if err != nil { + return nil, err + } + + m, ok := i.(map[string]any) + if !ok { + // It is an empty pointer + // sheriff just returned the very same object + return nil, nil + } + + return m, nil +} + +// MarshalCreateUserConfig returns marshaled user config for Create operation +func MarshalCreateUserConfig(c any) (map[string]any, error) { + return marshalUserConfig(c, "create", "update") +} + +// MarshalUpdateUserConfig returns marshaled user config for Update operation +func MarshalUpdateUserConfig(c any) (map[string]any, error) { + return marshalUserConfig(c, "update") +} + +func MapToDTO(src map[string]any, dst any) error { + b, err := json.Marshal(&src) + if err != nil { + return err + } + return json.Unmarshal(b, dst) +} + +// ValueStringPointer checks for "unknown" +// Returns nil instead of zero value +func ValueStringPointer(v types.String) *string { + if v.IsUnknown() || v.IsNull() { + return nil + } + return v.ValueStringPointer() +} + +// ValueBoolPointer checks for "unknown" +// Returns nil instead of zero value +func ValueBoolPointer(v types.Bool) *bool { + if v.IsUnknown() || v.IsNull() { + return nil + } + return v.ValueBoolPointer() +} + +// ValueInt64Pointer checks for "unknown" +// Returns nil instead of zero value +func ValueInt64Pointer(v types.Int64) *int64 { + if v.IsUnknown() || v.IsNull() { + return nil + } + return v.ValueInt64Pointer() +} + +// ValueFloat64Pointer checks for "unknown" +// Returns nil instead of zero value +func ValueFloat64Pointer(v types.Float64) *float64 { + if v.IsUnknown() || v.IsNull() { + return nil + } + return v.ValueFloat64Pointer() +} diff --git a/internal/schemautil/userconfig/dist/integration_endpoint_types.go b/internal/schemautil/userconfig/dist/integration_endpoint_types.go index 55989ada9..4369fec7f 100644 --- a/internal/schemautil/userconfig/dist/integration_endpoint_types.go +++ b/internal/schemautil/userconfig/dist/integration_endpoint_types.go @@ -3,8 +3,9 @@ package dist import ( - schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" ) // IntegrationEndpointTypeDatadog is a generated function returning the schema of the datadog IntegrationEndpointType. diff --git a/internal/schemautil/userconfig/dist/integration_types.go b/internal/schemautil/userconfig/dist/integration_types.go index 66b10740f..de191b448 100644 --- a/internal/schemautil/userconfig/dist/integration_types.go +++ b/internal/schemautil/userconfig/dist/integration_types.go @@ -3,8 +3,9 @@ package dist import ( - schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" ) // IntegrationTypeClickhouseKafka is a generated function returning the schema of the clickhouse_kafka IntegrationType. diff --git a/internal/schemautil/userconfig/dist/service_types.go b/internal/schemautil/userconfig/dist/service_types.go index 2945edc6e..95cb0d759 100644 --- a/internal/schemautil/userconfig/dist/service_types.go +++ b/internal/schemautil/userconfig/dist/service_types.go @@ -3,8 +3,9 @@ package dist import ( - schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" ) // ServiceTypeCassandra is a generated function returning the schema of the cassandra ServiceType. diff --git a/internal/sdkprovider/provider/provider.go b/internal/sdkprovider/provider/provider.go index 1628932b3..1dd0006a4 100644 --- a/internal/sdkprovider/provider/provider.go +++ b/internal/sdkprovider/provider/provider.go @@ -104,7 +104,6 @@ func Provider(version string) *schema.Provider { "aiven_transit_gateway_vpc_attachment": vpc.DatasourceTransitGatewayVPCAttachment(), // service integrations - "aiven_service_integration": serviceintegration.DatasourceServiceIntegration(), "aiven_service_integration_endpoint": serviceintegration.DatasourceServiceIntegrationEndpoint(), // m3db @@ -203,7 +202,6 @@ func Provider(version string) *schema.Provider { "aiven_transit_gateway_vpc_attachment": vpc.ResourceTransitGatewayVPCAttachment(), // service integrations - "aiven_service_integration": serviceintegration.ResourceServiceIntegration(), "aiven_service_integration_endpoint": serviceintegration.ResourceServiceIntegrationEndpoint(), // m3db diff --git a/internal/sdkprovider/service/kafkatopic/kafka_topic_cache.go b/internal/sdkprovider/service/kafkatopic/kafka_topic_cache.go index 932759eb0..eed3c903d 100644 --- a/internal/sdkprovider/service/kafkatopic/kafka_topic_cache.go +++ b/internal/sdkprovider/service/kafkatopic/kafka_topic_cache.go @@ -62,8 +62,6 @@ func (t *kafkaTopicCache) LoadByTopicName(projectName, serviceName, topicName st result.State = "CONFIGURING" } - log.Printf("[TRACE] retrieving from a topic cache `%+#v` for a topic name `%s`", result, topicName) - return result, ok } diff --git a/internal/sdkprovider/service/serviceintegration/service_integration_test.go b/internal/sdkprovider/service/serviceintegration/service_integration_test.go index 8bcfa5a9a..b7966fa94 100644 --- a/internal/sdkprovider/service/serviceintegration/service_integration_test.go +++ b/internal/sdkprovider/service/serviceintegration/service_integration_test.go @@ -25,7 +25,7 @@ func TestAccAivenServiceIntegration_should_fail(t *testing.T) { { Config: testAccServiceIntegrationShouldFailResource(), PlanOnly: true, - ExpectError: regexp.MustCompile("endpoint id should have the following format: project_name/endpoint_id"), + ExpectError: regexp.MustCompile("endpoint id should have the following"), }, }, }) diff --git a/main.go b/main.go index d82346f09..3d013145e 100644 --- a/main.go +++ b/main.go @@ -12,6 +12,7 @@ import ( ) //go:generate go test -tags userconfig ./internal/schemautil/userconfig +//go:generate go run ./ucgenerator/... --integrations clickhouse_kafka,clickhouse_postgresql,datadog,external_aws_cloudwatch_metrics,kafka_connect,kafka_logs,kafka_mirrormaker,logs,metrics // version is the version of the provider. var version = "dev" diff --git a/ucgenerator/main.go b/ucgenerator/main.go new file mode 100644 index 000000000..4cb3aa912 --- /dev/null +++ b/ucgenerator/main.go @@ -0,0 +1,597 @@ +package main + +import ( + "flag" + "fmt" + "go/format" + "log" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/aiven/go-api-schemas/pkg/dist" + "github.com/dave/jennifer/jen" + "golang.org/x/exp/slices" + "golang.org/x/tools/imports" + "gopkg.in/yaml.v3" +) + +const ( + destPath = "./internal/plugin/service/userconfig/" + localPrefix = "github.com/aiven/terraform-provider-aiven" + importDiag = "github.com/hashicorp/terraform-plugin-framework/diag" + importTypes = "github.com/hashicorp/terraform-plugin-framework/types" + importAttr = "github.com/hashicorp/terraform-plugin-framework/attr" + importSchemautil = "github.com/aiven/terraform-provider-aiven/internal/schemautil" + importResourceSchema = "github.com/hashicorp/terraform-plugin-framework/resource/schema" + importDatasourceSchema = "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + importListvalidator = "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + importValidator = "github.com/hashicorp/terraform-plugin-framework/schema/validator" + codeGenerated = "Code generated by user config generator. DO NOT EDIT." +) + +func main() { + var serviceList, integrationList string + flag.StringVar(&serviceList, "services", "", "Comma separated service list of names to generate for") + flag.StringVar(&integrationList, "integrations", "", "Comma separated integrations list of names to generate for") + flag.Parse() + + if serviceList+integrationList == "" { + log.Fatal("--service or --integrations must be provided") + } + + if serviceList != "" { + err := generate("service", dist.ServiceTypes, strings.Split(serviceList, ",")) + if err != nil { + log.Fatal(err) + } + } + + if integrationList != "" { + err := generate("integration", dist.IntegrationTypes, strings.Split(integrationList, ",")) + if err != nil { + log.Fatal(err) + } + } +} + +func generate(kind string, data []byte, keys []string) error { + var root map[string]*object + + err := yaml.Unmarshal(data, &root) + if err != nil { + return err + } + + for key, o := range root { + if !slices.Contains(keys, key) { + continue + } + + pkgName := strings.ReplaceAll(key, "_", "") + o.isRoot = true + o.init("UserConfig") + + // Generates file + f := jen.NewFile(pkgName) + f.HeaderComment(codeGenerated) + f.ImportAlias(importResourceSchema, "resource") + f.ImportAlias(importDatasourceSchema, "datasource") + genAllForObject(f, o) + + // Sorts imports + imports.LocalPrefix = localPrefix + b, err := imports.Process("", []byte(f.GoString()), nil) + if err != nil { + return err + } + + // Saves file + dirPath := filepath.Join(destPath, kind, pkgName) + err = os.MkdirAll(dirPath, os.ModePerm) + if err != nil { + return err + } + + err = os.WriteFile(filepath.Join(dirPath, key+".go"), b, 0644) + if err != nil { + return err + } + + testFile, err := genTestFile(pkgName, o) + if err != nil { + return err + } + + testFileByte, err := format.Source([]byte(testFile)) + if err != nil { + return err + } + + err = os.WriteFile(filepath.Join(dirPath, key+"_test.go"), testFileByte, 0644) + if err != nil { + return err + } + } + return nil +} + +func genAllForObject(f *jen.File, o *object) { + genSchema(f, o, "Resource", importResourceSchema) + genSchema(f, o, "DataSource", importDatasourceSchema) + genTFObject(f, o) + genDTOObject(f, o) + genExpander(f, o) + genFlattener(f, o) + genAttrsMap(f, o) + + for _, p := range o.properties { + if p.isNestedBlock() { + if p.Type == objectTypeArray { + genAllForObject(f, p.ArrayItems) + } else { + genAllForObject(f, p) + } + } + } + + if !o.isRoot { + return + } + + // Exports handy public functions for root object only + f.Op(` +// Expand public function that converts tf object into dto +func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { + return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +} + +// Flatten public function that converts dto into tf object +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { + o := new(dtoUserConfig) + err := schemautil.MapToDTO(m, o) + if err != nil { + diags.AddError("failed to marshal map user config to dto", err.Error()) + return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + } + return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) +} +`) +} + +// genExpander creates function that unwraps TF object into json +func genExpander(f *jen.File, o *object) { + body := make([]jen.Code, 0) + props := jen.Dict{} + for _, p := range o.properties { + var value *jen.Statement + switch p.Type { + case objectTypeObject: + value = jen.Op(p.varName) + v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "ExpandListBlockNested").Types(jen.Id(p.tfoStructName), jen.Id(p.dtoStructName)).Call( + jen.Id("ctx"), + jen.Id("diags"), + jen.Id("expand"+p.camelName), + jen.Id("o").Dot(p.camelName), + ) + body = append(body, v, ifErr()) + case objectTypeArray: + value = jen.Op(p.varName) + if p.ArrayItems.Type == objectTypeObject { + // It is a list of objects + v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "ExpandListNested").Types(jen.Id(p.tfoStructName), jen.Id(p.dtoStructName)).Call( + jen.Id("ctx"), + jen.Id("diags"), + jen.Id("expand"+p.camelName), + jen.Id("o").Dot(p.camelName), + ) + body = append(body, v, ifErr()) + } else { + // It is a list of scalars + // We don't want pointer scalars here + t := strings.ReplaceAll(getDTOType(p.ArrayItems), "*", "") + v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "ExpandList").Types(jen.Id(t)).Call( + jen.Id("ctx"), + jen.Id("diags"), + jen.Id("o").Dot(p.camelName), + ) + body = append(body, v, ifErr()) + } + default: + if p.Required { + value = jen.Id("o").Dot(p.camelName).Dot(getTFTypeToValue(p)).Call() + } else { + // Own functions for casting values + value = jen.Qual(importSchemautil, getTFTypeToValue(p)).Call(jen.Id("o").Dot(p.camelName)) + } + } + + props[jen.Id(p.camelName)] = value + } + + // Function body + return statement + body = append( + body, + jen.Return(jen.Id("&"+o.dtoStructName).Values(props)), + ) + + funcName := "expand" + o.camelName + f.Comment(funcName + " expands tf object into dto object") + f.Func().Id(funcName).Params( + jen.Id("ctx").Qual("context", "Context"), + jen.Id("diags").Op("*").Qual(importDiag, "Diagnostics"), + jen.Id("o").Op("*"+o.tfoStructName), + ).Id("*" + o.dtoStructName).Block(body...) +} + +// genFlattener creates function that unwraps json into TF object +func genFlattener(f *jen.File, o *object) { + body := make([]jen.Code, 0) + props := jen.Dict{} + for _, p := range o.properties { + var value *jen.Statement + switch p.Type { + case objectTypeObject: + value = jen.Op(p.varName) + v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "FlattenListBlockNested").Types(jen.Id(p.dtoStructName), jen.Id(p.tfoStructName)).Call( + jen.Id("ctx"), + jen.Id("diags"), + jen.Id("flatten"+p.camelName), + jen.Id(p.attrsName), + jen.Id("o").Dot(p.camelName), + ) + body = append(body, v, ifErr()) + case objectTypeArray: + value = jen.Op(p.varName) + if p.ArrayItems.Type == objectTypeObject { + // It is a list of objects + v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "FlattenListNested").Types(jen.Id(p.dtoStructName), jen.Id(p.tfoStructName)).Call( + jen.Id("ctx"), + jen.Id("diags"), + jen.Id("flatten"+p.camelName), + jen.Id(p.attrsName), + jen.Id("o").Dot(p.camelName), + ) + body = append(body, v, ifErr()) + } else { + //It is a list of scalars + v := jen.List(jen.Id(p.varName), jen.Id("d")).Op(":=").Qual(importTypes, "ListValueFrom").Call( + jen.Id("ctx"), + jen.Qual(importTypes, getTFType(p.ArrayItems)+"Type"), + jen.Id("o").Dot(p.camelName), + ) + body = append( + body, + v, + jen.Id("diags").Dot("Append").Call(jen.Id("d").Op("...")), + ifErr(), + ) + } + default: + value = jen.Qual(importTypes, getTFTypeFromValue(p)).Call(jen.Id("o").Dot(p.camelName)) + } + + if value == nil { + continue + } + + props[jen.Id(p.camelName)] = value + } + + // Function body + return statement + body = append( + body, + jen.Return(jen.Id("&"+o.tfoStructName).Values(props)), + ) + + funcName := "flatten" + o.camelName + f.Comment(funcName + " flattens dto object into tf object") + f.Func().Id(funcName).Params( + jen.Id("ctx").Qual("context", "Context"), + jen.Id("diags").Op("*").Qual(importDiag, "Diagnostics"), + jen.Id("o").Op("*"+o.dtoStructName), + ).Id("*" + o.tfoStructName).Block(body...) +} + +// genAttrsMap creates attributes map for Flatten functions to "unwrap" response json into TF object +func genAttrsMap(f *jen.File, o *object) { + values := jen.Dict{} + for _, p := range o.properties { + key := jen.Lit(p.tfName) + switch p.Type { + case objectTypeArray, objectTypeObject: + var v jen.Code + if p.isNestedBlock() { + v = jen.Qual(importTypes, "ObjectType").Values(jen.Dict{ + jen.Id("AttrTypes"): jen.Id(p.attrsName), + }) + } else { + v = jen.Qual(importTypes, getTFType(p.ArrayItems)+"Type") + } + values[key] = jen.Qual(importTypes, "ListType").Values(jen.Dict{jen.Id("ElemType"): v}) + default: + values[key] = jen.Qual(importTypes, getTFType(p)+"Type") + } + } + f.Var().Id(o.attrsName).Op("=").Map(jen.String()).Qual(importAttr, "Type").Values(values) +} + +// genTFObject creates TF object (for plan) +func genTFObject(f *jen.File, o *object) { + fields := make([]jen.Code, 0) + for _, p := range o.properties { + fields = append(fields, jen.Id(p.camelName).Qual(importTypes, getTFType(p)).Tag(map[string]string{"tfsdk": p.tfName})) + } + f.Comment(fmt.Sprintf("%s %s", o.tfoStructName, getDescription(o))) + f.Type().Id(o.tfoStructName).Struct(fields...) +} + +// genDTOObject creates DTO object to send over HTTP +func genDTOObject(f *jen.File, o *object) { + fields := make([]jen.Code, 0) + for _, p := range o.properties { + tags := map[string]string{"json": p.jsonName, "groups": "create"} + if !p.Required { + tags["json"] += ",omitempty" + } + if !p.CreateOnly { + tags["groups"] += ",update" + } + fields = append(fields, jen.Id(p.camelName).Id(getDTOType(p)).Tag(tags)) + } + f.Comment(o.dtoStructName + " request/response object") + f.Type().Id(o.dtoStructName).Struct(fields...) +} + +// genSchema generates TF schema. For root object only, i.e. RedisUserConfig +func genSchema(f *jen.File, o *object, name, pkg string) { + if !o.isRoot { + return + } + + funcName := fmt.Sprintf("New%sSchema", name) + f.Comment(fmt.Sprintf("%s returns %s schema", funcName, strings.ToLower(name))) + f.Func().Id(funcName).Params().Qual(pkg, "ListNestedBlock").Block( + jen.Return(getSchemaAttributes(o, pkg)), + ) +} + +func getSchemaAttributes(o *object, pkg string) jen.Code { + isResource := pkg == importResourceSchema + blocks := jen.Dict{} + attribs := jen.Dict{} + + // Array properties are its item properties + properties := o.properties + if o.Type == objectTypeArray { + properties = o.ArrayItems.properties + } + + for _, p := range properties { + key := jen.Lit(p.tfName) + if p.isNestedBlock() { + blocks[key] = getSchemaAttributes(p, pkg) + } else { + // For scalars + var value *jen.Statement + switch p.Type { + case objectTypeObject: + // Schemaless map + panic("schemaless objects are not supported") + case objectTypeArray: + value = jen.Qual(importTypes, getTFType(p.ArrayItems)+"Type") + } + + values := getSchemaAttributeValues(p, isResource) + values[jen.Id("ElementType")] = value + attribs[jen.Lit(p.tfName)] = jen.Qual(pkg, getTFType(p)+"Attribute").Values(values) + } + } + + nested := jen.Dict{} + if len(blocks) > 0 { + nested[jen.Id("Blocks")] = jen.Map(jen.String()).Qual(pkg, "Block").Values(blocks) + } + + if len(attribs) > 0 { + nested[jen.Id("Attributes")] = jen.Map(jen.String()).Qual(pkg, "Attribute").Values(attribs) + } + + values := getSchemaAttributeValues(o, isResource) + values[jen.Id("NestedObject")] = jen.Qual(pkg, "NestedBlockObject").Values(nested) + return jen.Qual(pkg, "ListNestedBlock").Values(values) +} + +func getSchemaAttributeValues(o *object, isResource bool) jen.Dict { + a := jen.Dict{} + + if d := getDescription(o); d != "" { + a[jen.Id("Description")] = jen.Lit(d) + } + + if o.IsDeprecated { + a[jen.Id("DeprecationMessage")] = jen.Lit(fmt.Sprintf("%q is deprecated", o.tfName)) + } + + validators := make([]jen.Code, 0) + if o.MinItems != nil { + validators = append(validators, valSizeAtLeast(*o.MinItems)) + } + + if o.MaxItems != nil { + validators = append(validators, valSizeAtMost(*o.MaxItems)) + } + + if !o.isNestedBlock() { + if !isResource { + a[jen.Id("Computed")] = jen.True() + } else { + if o.Required { + a[jen.Id("Required")] = jen.True() + } else { + a[jen.Id("Computed")] = jen.True() + a[jen.Id("Optional")] = jen.True() + + if o.Default != nil { + a[jen.Id("Default")] = getStaticDefault(o) + } + } + } + } + + if len(validators) > 0 { + a[jen.Id("Validators")] = valValidatorList(validators...) + } + + return a +} + +// getTFType matches generator types into plugin types +func getTFType(o *object) string { + switch o.Type { + case objectTypeObject: + if o.isNestedBlock() { + return "List" + } + return "Map" + case objectTypeArray: + return "List" + case objectTypeString: + return "String" + case objectTypeBoolean: + return "Bool" + case objectTypeInteger: + return "Int64" + case objectTypeNumber: + return "Float64" + } + panic(fmt.Sprintf("Unknown type for %q", o.jsonName)) +} + +func getTFTypeToValue(o *object) string { + v := getTFType(o) + if !o.Required { + return fmt.Sprintf("Value%sPointer", v) + } + return "Value" + v +} + +func getTFTypeFromValue(o *object) string { + v := getTFType(o) + if !o.Required { + return v + "PointerValue" + } + return v + "Value" +} + +func getDTOType(o *object) string { + optional := "*" + if o.Required { + optional = "" + } + + switch o.Type { + case objectTypeObject: + return "*" + o.dtoStructName + case objectTypeArray: + t := "[]" + getDTOType(o.ArrayItems) + if o.ArrayItems.Type == objectTypeObject { + return t + } + // We don't want pointer scalars in slice + return strings.ReplaceAll(t, "*", "") + case objectTypeString: + return optional + "string" + case objectTypeBoolean: + return optional + "bool" + case objectTypeInteger: + return optional + "int64" + case objectTypeNumber: + return optional + "float64" + } + panic(fmt.Sprintf("Unknown type for %q", o.jsonName)) +} + +// getStaticDefault returns "default" value for given field +func getStaticDefault(o *object) *jen.Statement { + var v *jen.Statement + switch o.Type { + case objectTypeString: + v = jen.Lit(o.Default.(string)) + case objectTypeInteger: + d, err := strconv.Atoi(o.Default.(string)) + if err != nil { + return nil + } + v = jen.Lit(d) + case objectTypeNumber: + v = jen.Lit(o.Default.(float64)) + case objectTypeBoolean: + v = jen.Lit(o.Default.(bool)) + default: + return nil + } + d := getTFType(o) + i := fmt.Sprintf("%s/%sdefault", importResourceSchema, strings.ToLower(d)) + return jen.Qual(i, "Static"+d).Call(v) +} + +func getDescription(o *object) string { + desc := make([]string, 0) + d := o.Description + if len(d) < len(o.Title) { + d = o.Title + } + + if d != "" { + desc = append(desc, addDot(d)) + } + + if o.Default != nil && o.Type != objectTypeArray { + desc = append(desc, fmt.Sprintf("The default value is `%v`.", o.Default)) + } + + // Trims dot from description, so it doesn't look weird with link to nested schema + // Example: Databases to expose[dot] (see [below for nested schema]...) + if len(desc) == 1 && o.isNestedBlock() { + return strings.Trim(desc[0], ".") + } + + return strings.Join(desc, " ") +} + +func addDot(s string) string { + if s != "" { + switch s[len(s)-1:] { + case ".", "!", "?": + default: + s += "." + } + } + return s +} + +func getValidator(name string, v any) *jen.Statement { + return jen.Qual(importListvalidator, name).Call(jen.Lit(v)) +} + +func valSizeAtLeast(n int) *jen.Statement { + return getValidator("SizeAtLeast", n) +} + +func valSizeAtMost(n int) *jen.Statement { + return getValidator("SizeAtMost", n) +} + +func valValidatorList(c ...jen.Code) *jen.Statement { + return jen.Index().Qual(importValidator, "List").Values(c...) +} + +func ifErr() *jen.Statement { + return jen.If(jen.Id("diags").Dot("HasError").Call()).Block(jen.Return(jen.Nil())) +} + +func toPtr[T any](v T) *T { + return &v +} diff --git a/ucgenerator/models.go b/ucgenerator/models.go new file mode 100644 index 000000000..2b6942269 --- /dev/null +++ b/ucgenerator/models.go @@ -0,0 +1,142 @@ +package main + +import ( + "strings" + + "github.com/stoewer/go-strcase" + "golang.org/x/exp/slices" +) + +type objectType string + +const ( + objectTypeObject objectType = "object" + objectTypeArray objectType = "array" + objectTypeString objectType = "string" + objectTypeBoolean objectType = "boolean" + objectTypeInteger objectType = "integer" + objectTypeNumber objectType = "number" +) + +type object struct { + isRoot bool // top level object + jsonName string // original name from json spec + tfName string // terraform manifest field, unlike jsonName, can't store dot symbol + tfoStructName string + dtoStructName string + camelName string + varName string + attrsName string + properties []*object + parent *object + + Type objectType `yaml:"-"` + Required bool `yaml:"-"` + + IsDeprecated bool `yaml:"is_deprecated"` + Default any `yaml:"default"` + Enum []*struct { + Value string `yaml:"value"` + IsDeprecated bool `yaml:"is_deprecated"` + } `yaml:"enum"` + Pattern string `yaml:"pattern"` + MinItems *int `yaml:"min_items"` + MaxItems *int `yaml:"max_items"` + MinLength *int `yaml:"min_length"` + MaxLength *int `yaml:"max_length"` + Minimum *float64 `yaml:"minimum"` + Maximum *float64 `yaml:"maximum"` + OrigType any `yaml:"type"` + Format string `yaml:"format"` + Title string `yaml:"title"` + Description string `yaml:"description"` + Properties map[string]*object `yaml:"properties"` + ArrayItems *object `yaml:"items"` + RequiredFields []string `yaml:"required"` + CreateOnly bool `yaml:"create_only"` + Nullable bool `yaml:"-"` +} + +func (o *object) isNestedBlock() bool { + switch o.Type { + case objectTypeObject: + return len(o.Properties) > 0 + case objectTypeArray: + switch o.ArrayItems.Type { + case objectTypeObject, objectTypeArray: + return true + } + } + return false +} + +func (o *object) init(name string) { + o.jsonName = name + o.tfName = strings.ReplaceAll(name, ".", "__") + o.camelName = toCamelCase(name) + + low := toLowerFirst(o.camelName) + o.varName = low + "Var" + o.attrsName = low + "Attrs" + o.tfoStructName = "tfo" + o.camelName + o.dtoStructName = "dto" + o.camelName + + // Sorts properties, so they keep order on each generation + keys := make([]string, 0, len(o.Properties)) + for k := range o.Properties { + keys = append(keys, k) + } + slices.Sort(keys) + for _, k := range keys { + o.properties = append(o.properties, o.Properties[k]) + } + + required := make(map[string]bool, len(o.RequiredFields)) + for _, k := range o.RequiredFields { + required[k] = true + } + + for _, k := range keys { + child := o.Properties[k] + child.parent = o + child.Required = required[k] + child.init(k) + } + + // Types can be list of strings, or a string + if v, ok := o.OrigType.(string); ok { + o.Type = objectType(v) + } else if v, ok := o.OrigType.([]interface{}); ok { + o.Type = objectType(v[0].(string)) + for _, t := range v { + switch s := t.(string); s { + case "null": + o.Nullable = true + default: + o.Type = objectType(s) + } + } + } + + if o.Type == objectTypeArray { + o.ArrayItems.parent = o + o.ArrayItems.init(name) + } + + // In terraform objects are lists of one item + // Root item and properties should have max constraint + if o.Type == objectTypeObject { + if o.isRoot || o.parent != nil && o.parent.Type == objectTypeObject { + o.MaxItems = toPtr(1) + } + } +} + +// toCamelCase some fields has dots within, makes cleaner camelCase +func toCamelCase(s string) string { + return strcase.UpperCamelCase(strings.ReplaceAll(s, ".", "_")) +} + +func toLowerFirst(s string) string { + return strings.ToLower(s[0:1]) + s[1:] +} diff --git a/ucgenerator/tests.go b/ucgenerator/tests.go new file mode 100644 index 000000000..fe63ec908 --- /dev/null +++ b/ucgenerator/tests.go @@ -0,0 +1,151 @@ +package main + +import ( + "encoding/json" + "fmt" + "strings" +) + +// genJSONSample generates sample JSON for a test +// If not allFields provided, creates a smaller json, which helps to test nil values (missing) +func genJSONSample(b *strings.Builder, o *object, allFields bool) string { + switch o.Type { + case objectTypeObject: + b.WriteString("{") + for i, p := range o.properties { + // Either field required or all fields printed + if !(p.Required || allFields || !p.CreateOnly) { + continue + } + + b.WriteString(fmt.Sprintf("%q:", p.jsonName)) + genJSONSample(b, p, allFields) + if i+1 != len(o.properties) { + b.WriteString(",") + } + } + b.WriteString("}") + case objectTypeArray: + b.WriteString("[") + genJSONSample(b, o.ArrayItems, allFields) + b.WriteString("]") + case objectTypeString: + b.WriteString(`"foo"`) + case objectTypeBoolean: + b.WriteString("true") + case objectTypeInteger: + b.WriteString("1") + case objectTypeNumber: + b.WriteString("1") + } + return b.String() +} + +func genTestFile(pkg string, o *object) (string, error) { + allFields, err := indentJSON(genJSONSample(new(strings.Builder), o, true)) + if err != nil { + return "", err + } + + updateOnlyFields, err := indentJSON(genJSONSample(new(strings.Builder), o, false)) + if err != nil { + return "", err + } + + file := fmt.Sprintf( + testFile, + codeGenerated, + pkg, + o.camelName, + fmt.Sprintf("`%s`", allFields), + fmt.Sprintf("`%s`", updateOnlyFields), + ) + + return strings.TrimSpace(file), nil +} + +func indentJSON(s string) (string, error) { + s = strings.ReplaceAll(s, ",}", "}") // fixes trailing comma when not all fields are generated + m := make(map[string]any) + err := json.Unmarshal([]byte(s), &m) + if err != nil { + return "", err + } + + b, err := json.MarshalIndent(m, "", " ") + if err != nil { + return "", err + } + return string(b), nil +} + +const testFile = ` +// %[1]s + +package %[2]s + +import ( + "context" + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/stretchr/testify/require" + + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +const allFields = %[4]s +const updateOnlyFields = %[5]s + +func Test%[3]s(t *testing.T) { + cases := []struct{ + name string + source string + expect string + marshal func (any) (map[string]any, error) + }{ + { + name: "fields to create resource", + source: allFields, + expect: allFields, + marshal: schemautil.MarshalCreateUserConfig, + }, + { + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + marshal: schemautil.MarshalUpdateUserConfig, + }, + } + + ctx := context.Background() + diags := new(diag.Diagnostics) + for _, opt := range cases { + t.Run(opt.name, func(t *testing.T) { + dto := new(dto%[3]s) + err := json.Unmarshal([]byte(opt.source), dto) + require.NoError(t, err) + + // From json to TF + tfo := flatten%[3]s(ctx, diags, dto) + require.Empty(t, diags) + + // From TF to json + config := expand%[3]s(ctx, diags, tfo) + require.Empty(t, diags) + + // Run specific marshal (create or update resource) + dtoConfig, err := opt.marshal(config) + require.NoError(t, err) + + // Compares that output is strictly equal to the input + // If so, the flow is valid + b, err := json.MarshalIndent(dtoConfig, "", " ") + require.NoError(t, err) + require.Empty(t, cmp.Diff(opt.expect, string(b))) + }) + } +} +` From 159b395d716ff004a48fe24cd587981f42d7813e Mon Sep 17 00:00:00 2001 From: Murad Biashimov Date: Thu, 5 Oct 2023 13:24:13 +0300 Subject: [PATCH 02/27] ci: fix code generation (#1381) --- .github/workflows/userconfig-generate-schema.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/userconfig-generate-schema.yml b/.github/workflows/userconfig-generate-schema.yml index 3941a7394..fd52b9347 100644 --- a/.github/workflows/userconfig-generate-schema.yml +++ b/.github/workflows/userconfig-generate-schema.yml @@ -17,7 +17,10 @@ jobs: - uses: actions/setup-go@v4 with: go-version-file: go.mod - - run: make generate + - name: install goimports + run: go install golang.org/x/tools/cmd/goimports@latest + - name: generate + run: make generate - uses: stefanzweifel/git-auto-commit-action@v4 with: commit_message: "chore(userconfig): generate schema" From e6c430c20c99d0a61c75df55bdf31e0e209b6bfb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Oct 2023 05:24:14 -0700 Subject: [PATCH 03/27] build(deps): bump github.com/gruntwork-io/terratest from 0.44.0 to 0.45.0 (#1379) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index fab4215a2..2251d0c11 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/docker/go-units v0.5.0 github.com/ettle/strcase v0.1.1 github.com/google/go-cmp v0.5.9 - github.com/gruntwork-io/terratest v0.44.0 + github.com/gruntwork-io/terratest v0.45.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/terraform-plugin-framework v1.4.0 github.com/hashicorp/terraform-plugin-go v0.19.0 diff --git a/go.sum b/go.sum index 78c4d5e40..ae10b7305 100644 --- a/go.sum +++ b/go.sum @@ -378,8 +378,8 @@ github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6c github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/gruntwork-io/terratest v0.44.0 h1:3k7lglJFAtw77p2HnR5vaZBCBnlHmu3DuLjVyuCZXJ0= -github.com/gruntwork-io/terratest v0.44.0/go.mod h1:EAEuzSjvxAzQoJCEQ06bJPTmdC9HikzgvhmxnAYuExM= +github.com/gruntwork-io/terratest v0.45.0 h1:02VuyLRmqOO45TaTH4P4mc44S18er5Rn4CooTUY0uek= +github.com/gruntwork-io/terratest v0.45.0/go.mod h1:4TWB5SYgATxJFfg+RNpE0gwiUWxtfMLGOXo5gwcGgMs= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= From 9f28058ed4fe52f05e6ccd30491fed46209a52cb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Oct 2023 08:11:13 +0000 Subject: [PATCH 04/27] build(deps): bump github.com/aiven/go-api-schemas from 1.27.1-0.20230823111901-1e0306909d02 to 1.35.0 (#1383) --- docs/data-sources/flink.md | 2 + docs/data-sources/grafana.md | 1 + docs/data-sources/kafka.md | 20 + docs/data-sources/opensearch.md | 43 + docs/resources/flink.md | 2 + docs/resources/grafana.md | 3 +- docs/resources/kafka.md | 20 + docs/resources/opensearch.md | 43 + go.mod | 2 +- go.sum | 4 +- .../userconfig/apiconvert/fromapi_test.go | 2 + .../userconfig/dist/service_types.go | 783 +++++++++++++++++- internal/schemautil/userconfig/util.go | 4 +- 13 files changed, 922 insertions(+), 7 deletions(-) diff --git a/docs/data-sources/flink.md b/docs/data-sources/flink.md index 39ede94ce..3cfb9e5d0 100644 --- a/docs/data-sources/flink.md +++ b/docs/data-sources/flink.md @@ -83,12 +83,14 @@ Read-Only: Read-Only: +- `additional_backup_regions` (List of String) - `flink_version` (String) - `ip_filter` (List of String) - `ip_filter_object` (List of Object) (see [below for nested schema](#nestedobjatt--flink_user_config--ip_filter_object)) - `ip_filter_string` (List of String) - `number_of_task_slots` (Number) - `privatelink_access` (List of Object) (see [below for nested schema](#nestedobjatt--flink_user_config--privatelink_access)) +- `static_ips` (Boolean) ### Nested Schema for `flink_user_config.ip_filter_object` diff --git a/docs/data-sources/grafana.md b/docs/data-sources/grafana.md index 75f8faf5c..6e41e05d5 100644 --- a/docs/data-sources/grafana.md +++ b/docs/data-sources/grafana.md @@ -119,6 +119,7 @@ Read-Only: - `service_to_fork_from` (String) - `smtp_server` (List of Object) (see [below for nested schema](#nestedobjatt--grafana_user_config--smtp_server)) - `static_ips` (Boolean) +- `unified_alerting_enabled` (Boolean) - `user_auto_assign_org` (Boolean) - `user_auto_assign_org_role` (String) - `viewers_can_edit` (Boolean) diff --git a/docs/data-sources/kafka.md b/docs/data-sources/kafka.md index d161e591a..0d56c4253 100644 --- a/docs/data-sources/kafka.md +++ b/docs/data-sources/kafka.md @@ -108,6 +108,7 @@ Read-Only: - `schema_registry` (Boolean) - `schema_registry_config` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_user_config--schema_registry_config)) - `static_ips` (Boolean) +- `tiered_storage` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_user_config--tiered_storage)) ### Nested Schema for `kafka_user_config.ip_filter_object` @@ -139,6 +140,8 @@ Read-Only: - `log_flush_interval_ms` (Number) - `log_index_interval_bytes` (Number) - `log_index_size_max_bytes` (Number) +- `log_local_retention_bytes` (Number) +- `log_local_retention_ms` (Number) - `log_message_downconversion_enable` (Boolean) - `log_message_timestamp_difference_max_ms` (Number) - `log_message_timestamp_type` (String) @@ -257,6 +260,23 @@ Read-Only: - `topic_name` (String) + +### Nested Schema for `kafka_user_config.tiered_storage` + +Read-Only: + +- `enabled` (Boolean) +- `local_cache` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_user_config--tiered_storage--local_cache)) + + +### Nested Schema for `kafka_user_config.tiered_storage.local_cache` + +Read-Only: + +- `size` (Number) + + + ### Nested Schema for `service_integrations` diff --git a/docs/data-sources/opensearch.md b/docs/data-sources/opensearch.md index f9ec47d95..e6255d194 100644 --- a/docs/data-sources/opensearch.md +++ b/docs/data-sources/opensearch.md @@ -161,6 +161,7 @@ Read-Only: - `action_auto_create_index_enabled` (Boolean) - `action_destructive_requires_name` (Boolean) +- `auth_failure_listeners` (List of Object) (see [below for nested schema](#nestedobjatt--opensearch_user_config--opensearch--auth_failure_listeners)) - `cluster_max_shards_per_node` (Number) - `cluster_routing_allocation_node_concurrent_recoveries` (Number) - `email_sender_name` (String) @@ -175,6 +176,12 @@ Read-Only: - `indices_query_bool_max_clause_count` (Number) - `indices_recovery_max_bytes_per_sec` (Number) - `indices_recovery_max_concurrent_file_chunks` (Number) +- `ism_enabled` (Boolean) +- `ism_history_enabled` (Boolean) +- `ism_history_max_age` (Number) +- `ism_history_max_docs` (Number) +- `ism_history_rollover_check_period` (Number) +- `ism_history_rollover_retention_period` (Number) - `override_main_response_version` (Boolean) - `reindex_remote_whitelist` (List of String) - `script_max_compilations_rate` (String) @@ -191,6 +198,42 @@ Read-Only: - `thread_pool_write_queue_size` (Number) - `thread_pool_write_size` (Number) + +### Nested Schema for `opensearch_user_config.opensearch.auth_failure_listeners` + +Read-Only: + +- `internal_authentication_backend_limiting` (List of Object) (see [below for nested schema](#nestedobjatt--opensearch_user_config--opensearch--auth_failure_listeners--internal_authentication_backend_limiting)) +- `ip_rate_limiting` (List of Object) (see [below for nested schema](#nestedobjatt--opensearch_user_config--opensearch--auth_failure_listeners--ip_rate_limiting)) + + +### Nested Schema for `opensearch_user_config.opensearch.auth_failure_listeners.ip_rate_limiting` + +Read-Only: + +- `allowed_tries` (Number) +- `authentication_backend` (String) +- `block_expiry_seconds` (Number) +- `max_blocked_clients` (Number) +- `max_tracked_clients` (Number) +- `time_window_seconds` (Number) +- `type` (String) + + + +### Nested Schema for `opensearch_user_config.opensearch.auth_failure_listeners.ip_rate_limiting` + +Read-Only: + +- `allowed_tries` (Number) +- `block_expiry_seconds` (Number) +- `max_blocked_clients` (Number) +- `max_tracked_clients` (Number) +- `time_window_seconds` (Number) +- `type` (String) + + + ### Nested Schema for `opensearch_user_config.opensearch_dashboards` diff --git a/docs/resources/flink.md b/docs/resources/flink.md index 9dd5a629b..b0bf76415 100644 --- a/docs/resources/flink.md +++ b/docs/resources/flink.md @@ -81,12 +81,14 @@ Optional: Optional: +- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. - `flink_version` (String) Flink major version. - `ip_filter` (List of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. - `ip_filter_object` (Block List, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. (see [below for nested schema](#nestedblock--flink_user_config--ip_filter_object)) - `ip_filter_string` (List of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. - `number_of_task_slots` (Number) Task slots per node. For a 3 node plan, total number of task slots is 3x this value. - `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink. (see [below for nested schema](#nestedblock--flink_user_config--privatelink_access)) +- `static_ips` (Boolean) Use static public IP addresses. ### Nested Schema for `flink_user_config.ip_filter_object` diff --git a/docs/resources/grafana.md b/docs/resources/grafana.md index 7c970bdbe..53a6a2d77 100644 --- a/docs/resources/grafana.md +++ b/docs/resources/grafana.md @@ -78,7 +78,7 @@ resource "aiven_grafana" "gr1" { Optional: - `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. -- `alerting_enabled` (Boolean) Enable or disable Grafana alerting functionality. +- `alerting_enabled` (Boolean) Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled. - `alerting_error_or_timeout` (String) Default error or timeout setting for new alerting rules. - `alerting_max_annotations_to_keep` (Number) Max number of alert annotations that Grafana stores. 0 (default) keeps all alert annotations. - `alerting_nodata_or_nullvalues` (String) Default value for 'no data or null values' for new alerting rules. @@ -114,6 +114,7 @@ Optional: - `service_to_fork_from` (String) Name of another service to fork from. This has effect only when a new service is being created. - `smtp_server` (Block List, Max: 1) SMTP server settings. (see [below for nested schema](#nestedblock--grafana_user_config--smtp_server)) - `static_ips` (Boolean) Use static public IP addresses. +- `unified_alerting_enabled` (Boolean) Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details. - `user_auto_assign_org` (Boolean) Auto-assign new users on signup to main organization. Defaults to false. - `user_auto_assign_org_role` (String) Set role for new signups. Defaults to Viewer. - `viewers_can_edit` (Boolean) Users with view-only permission can edit but not save dashboards. diff --git a/docs/resources/kafka.md b/docs/resources/kafka.md index 94fc4ef95..5089332d5 100644 --- a/docs/resources/kafka.md +++ b/docs/resources/kafka.md @@ -107,6 +107,7 @@ Optional: - `schema_registry` (Boolean) Enable Schema-Registry service. The default value is `false`. - `schema_registry_config` (Block List, Max: 1) Schema Registry configuration. (see [below for nested schema](#nestedblock--kafka_user_config--schema_registry_config)) - `static_ips` (Boolean) Use static public IP addresses. +- `tiered_storage` (Block List, Max: 1) Tiered storage configuration. (see [below for nested schema](#nestedblock--kafka_user_config--tiered_storage)) ### Nested Schema for `kafka_user_config.ip_filter_object` @@ -141,6 +142,8 @@ Optional: - `log_flush_interval_ms` (Number) The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. - `log_index_interval_bytes` (Number) The interval with which Kafka adds an entry to the offset index. - `log_index_size_max_bytes` (Number) The maximum size in bytes of the offset index. +- `log_local_retention_bytes` (Number) The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. +- `log_local_retention_ms` (Number) The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. - `log_message_downconversion_enable` (Boolean) This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. . - `log_message_timestamp_difference_max_ms` (Number) The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. - `log_message_timestamp_type` (String) Define whether the timestamp in the message is message create time or log append time. @@ -259,6 +262,23 @@ Optional: - `topic_name` (String) The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to `_schemas`. + +### Nested Schema for `kafka_user_config.tiered_storage` + +Optional: + +- `enabled` (Boolean) Whether to enable the tiered storage functionality. +- `local_cache` (Block List, Max: 1) Local cache configuration. (see [below for nested schema](#nestedblock--kafka_user_config--tiered_storage--local_cache)) + + +### Nested Schema for `kafka_user_config.tiered_storage.local_cache` + +Optional: + +- `size` (Number) Local cache size in bytes. + + + ### Nested Schema for `service_integrations` diff --git a/docs/resources/opensearch.md b/docs/resources/opensearch.md index e3a15be4f..df16cf534 100644 --- a/docs/resources/opensearch.md +++ b/docs/resources/opensearch.md @@ -170,6 +170,7 @@ Optional: - `action_auto_create_index_enabled` (Boolean) Explicitly allow or block automatic creation of indices. Defaults to true. - `action_destructive_requires_name` (Boolean) Require explicit index names when deleting. +- `auth_failure_listeners` (Block List, Max: 1) Opensearch Security Plugin Settings. (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch--auth_failure_listeners)) - `cluster_max_shards_per_node` (Number) Controls the number of shards allowed in the cluster per data node. - `cluster_routing_allocation_node_concurrent_recoveries` (Number) How many concurrent incoming/outgoing shard recoveries (normally replicas) are allowed to happen on a node. Defaults to 2. - `email_sender_name` (String) This should be identical to the Sender name defined in Opensearch dashboards. @@ -184,6 +185,12 @@ Optional: - `indices_query_bool_max_clause_count` (Number) Maximum number of clauses Lucene BooleanQuery can have. The default value (1024) is relatively high, and increasing it may cause performance issues. Investigate other approaches first before increasing this value. - `indices_recovery_max_bytes_per_sec` (Number) Limits total inbound and outbound recovery traffic for each node. Applies to both peer recoveries as well as snapshot recoveries (i.e., restores from a snapshot). Defaults to 40mb. - `indices_recovery_max_concurrent_file_chunks` (Number) Number of file chunks sent in parallel for each recovery. Defaults to 2. +- `ism_enabled` (Boolean) Specifies whether ISM is enabled or not. The default value is `true`. +- `ism_history_enabled` (Boolean) Specifies whether audit history is enabled or not. The logs from ISM are automatically indexed to a logs document. The default value is `true`. +- `ism_history_max_age` (Number) The maximum age before rolling over the audit history index in hours. The default value is `24`. +- `ism_history_max_docs` (Number) The maximum number of documents before rolling over the audit history index. The default value is `2500000`. +- `ism_history_rollover_check_period` (Number) The time between rollover checks for the audit history index in hours. The default value is `8`. +- `ism_history_rollover_retention_period` (Number) How long audit history indices are kept in days. The default value is `30`. - `override_main_response_version` (Boolean) Compatibility mode sets OpenSearch to report its version as 7.10 so clients continue to work. Default is false. - `reindex_remote_whitelist` (List of String) Whitelisted addresses for reindexing. Changing this value will cause all OpenSearch instances to restart. - `script_max_compilations_rate` (String) Script compilation circuit breaker limits the number of inline script compilations within a period of time. Default is use-context. @@ -200,6 +207,42 @@ Optional: - `thread_pool_write_queue_size` (Number) Size for the thread pool queue. See documentation for exact details. - `thread_pool_write_size` (Number) Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value. + +### Nested Schema for `opensearch_user_config.opensearch.auth_failure_listeners` + +Optional: + +- `internal_authentication_backend_limiting` (Block List, Max: 1) . (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch--auth_failure_listeners--internal_authentication_backend_limiting)) +- `ip_rate_limiting` (Block List, Max: 1) IP address rate limiting settings. (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch--auth_failure_listeners--ip_rate_limiting)) + + +### Nested Schema for `opensearch_user_config.opensearch.auth_failure_listeners.internal_authentication_backend_limiting` + +Optional: + +- `allowed_tries` (Number) The number of login attempts allowed before login is blocked. +- `authentication_backend` (String) The internal backend. Enter `internal`. +- `block_expiry_seconds` (Number) The duration of time that login remains blocked after a failed login. +- `max_blocked_clients` (Number) The maximum number of blocked IP addresses. +- `max_tracked_clients` (Number) The maximum number of tracked IP addresses that have failed login. +- `time_window_seconds` (Number) The window of time in which the value for `allowed_tries` is enforced. +- `type` (String) The type of rate limiting. + + + +### Nested Schema for `opensearch_user_config.opensearch.auth_failure_listeners.ip_rate_limiting` + +Optional: + +- `allowed_tries` (Number) The number of login attempts allowed before login is blocked. +- `block_expiry_seconds` (Number) The duration of time that login remains blocked after a failed login. +- `max_blocked_clients` (Number) The maximum number of blocked IP addresses. +- `max_tracked_clients` (Number) The maximum number of tracked IP addresses that have failed login. +- `time_window_seconds` (Number) The window of time in which the value for `allowed_tries` is enforced. +- `type` (String) The type of rate limiting. + + + ### Nested Schema for `opensearch_user_config.opensearch_dashboards` diff --git a/go.mod b/go.mod index 2251d0c11..200a73d39 100644 --- a/go.mod +++ b/go.mod @@ -46,7 +46,7 @@ require ( cloud.google.com/go v0.110.0 // indirect cloud.google.com/go/storage v1.28.1 // indirect github.com/agext/levenshtein v1.2.3 // indirect - github.com/aiven/go-api-schemas v1.27.1-0.20230823111901-1e0306909d02 + github.com/aiven/go-api-schemas v1.35.0 github.com/aws/aws-sdk-go v1.44.122 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/davecgh/go-spew v1.1.1 // indirect diff --git a/go.sum b/go.sum index ae10b7305..5f79866c0 100644 --- a/go.sum +++ b/go.sum @@ -203,8 +203,8 @@ github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7l github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/aiven/aiven-go-client/v2 v2.1.0 h1:n8k34HpEQ7KgxRcyX/F5WKR6xh8MSAM6TtPHnghDNGg= github.com/aiven/aiven-go-client/v2 v2.1.0/go.mod h1:x0xhzxWEKAwKv0xY5FvECiI6tesWshcPHvjwl0B/1SU= -github.com/aiven/go-api-schemas v1.27.1-0.20230823111901-1e0306909d02 h1:nFAKxncY/5aokoDdnKRvUVsjV6MKQq0Cf/21uAPRjDg= -github.com/aiven/go-api-schemas v1.27.1-0.20230823111901-1e0306909d02/go.mod h1:RmQ8MfxwxAP2ji9eJtP6dICOaTMcQD9b5aQT3Bp7uzI= +github.com/aiven/go-api-schemas v1.35.0 h1:hNimpMWeFjU44AmiDzMWo8Hzimg+uoe4GPpoL3HGOi8= +github.com/aiven/go-api-schemas v1.35.0/go.mod h1:RmQ8MfxwxAP2ji9eJtP6dICOaTMcQD9b5aQT3Bp7uzI= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= diff --git a/internal/schemautil/userconfig/apiconvert/fromapi_test.go b/internal/schemautil/userconfig/apiconvert/fromapi_test.go index b7499386e..e9af74654 100644 --- a/internal/schemautil/userconfig/apiconvert/fromapi_test.go +++ b/internal/schemautil/userconfig/apiconvert/fromapi_test.go @@ -110,6 +110,8 @@ func TestFromAPI(t *testing.T) { "log_flush_interval_ms": 0, "log_index_interval_bytes": 0, "log_index_size_max_bytes": 0, + "log_local_retention_bytes": 0, + "log_local_retention_ms": 0, "log_message_downconversion_enable": false, "log_message_timestamp_difference_max_ms": 0, "log_message_timestamp_type": "", diff --git a/internal/schemautil/userconfig/dist/service_types.go b/internal/schemautil/userconfig/dist/service_types.go index 95cb0d759..337cbd298 100644 --- a/internal/schemautil/userconfig/dist/service_types.go +++ b/internal/schemautil/userconfig/dist/service_types.go @@ -1062,6 +1062,13 @@ func ServiceTypeElasticsearch() *schema.Schema { // ServiceTypeFlink is a generated function returning the schema of the flink ServiceType. func ServiceTypeFlink() *schema.Schema { s := map[string]*schema.Schema{ + "additional_backup_regions": { + Description: "Additional Cloud Regions for Backup Replication.", + Elem: &schema.Schema{Type: schema.TypeString}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, "flink_version": { Description: "Flink major version.", Optional: true, @@ -1143,6 +1150,11 @@ func ServiceTypeFlink() *schema.Schema { Optional: true, Type: schema.TypeList, }, + "static_ips": { + Description: "Use static public IP addresses.", + Optional: true, + Type: schema.TypeBool, + }, } return &schema.Schema{ @@ -1166,7 +1178,7 @@ func ServiceTypeGrafana() *schema.Schema { Type: schema.TypeList, }, "alerting_enabled": { - Description: "Enable or disable Grafana alerting functionality.", + Description: "Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled.", Optional: true, Type: schema.TypeBool, }, @@ -2024,6 +2036,11 @@ func ServiceTypeGrafana() *schema.Schema { Optional: true, Type: schema.TypeBool, }, + "unified_alerting_enabled": { + Description: "Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.", + Optional: true, + Type: schema.TypeBool, + }, "user_auto_assign_org": { Description: "Auto-assign new users on signup to main organization. Defaults to false.", Optional: true, @@ -2408,6 +2425,16 @@ func ServiceTypeKafka() *schema.Schema { Optional: true, Type: schema.TypeInt, }, + "log_local_retention_bytes": { + Description: "The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.", + Optional: true, + Type: schema.TypeInt, + }, + "log_local_retention_ms": { + Description: "The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.", + Optional: true, + Type: schema.TypeInt, + }, "log_message_downconversion_enable": { Description: "This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .", Optional: true, @@ -2605,6 +2632,16 @@ func ServiceTypeKafka() *schema.Schema { Optional: true, Type: schema.TypeInt, }, + "log_local_retention_bytes": { + Description: "The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.", + Optional: true, + Type: schema.TypeInt, + }, + "log_local_retention_ms": { + Description: "The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.", + Optional: true, + Type: schema.TypeInt, + }, "log_message_downconversion_enable": { Description: "This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .", Optional: true, @@ -3286,6 +3323,58 @@ func ServiceTypeKafka() *schema.Schema { Optional: true, Type: schema.TypeBool, }, + "tiered_storage": { + Description: "Tiered storage configuration.", + DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ + "enabled": { + Description: "Whether to enable the tiered storage functionality.", + Optional: true, + Type: schema.TypeBool, + }, + "local_cache": { + Description: "Local cache configuration.", + DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{"size": { + Description: "Local cache size in bytes.", + Optional: true, + Type: schema.TypeInt, + }}), + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"size": { + Description: "Local cache size in bytes.", + Optional: true, + Type: schema.TypeInt, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + }), + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "enabled": { + Description: "Whether to enable the tiered storage functionality.", + Optional: true, + Type: schema.TypeBool, + }, + "local_cache": { + Description: "Local cache configuration.", + DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{"size": { + Description: "Local cache size in bytes.", + Optional: true, + Type: schema.TypeInt, + }}), + Elem: &schema.Resource{Schema: map[string]*schema.Schema{"size": { + Description: "Local cache size in bytes.", + Optional: true, + Type: schema.TypeInt, + }}}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, } return &schema.Schema{ @@ -5435,6 +5524,316 @@ func ServiceTypeOpensearch() *schema.Schema { Optional: true, Type: schema.TypeBool, }, + "auth_failure_listeners": { + Description: "Opensearch Security Plugin Settings.", + DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ + "internal_authentication_backend_limiting": { + Description: ".", + DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ + "allowed_tries": { + Description: "The number of login attempts allowed before login is blocked.", + Optional: true, + Type: schema.TypeInt, + }, + "authentication_backend": { + Description: "The internal backend. Enter `internal`.", + Optional: true, + Type: schema.TypeString, + }, + "block_expiry_seconds": { + Description: "The duration of time that login remains blocked after a failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "max_blocked_clients": { + Description: "The maximum number of blocked IP addresses.", + Optional: true, + Type: schema.TypeInt, + }, + "max_tracked_clients": { + Description: "The maximum number of tracked IP addresses that have failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "time_window_seconds": { + Description: "The window of time in which the value for `allowed_tries` is enforced.", + Optional: true, + Type: schema.TypeInt, + }, + "type": { + Description: "The type of rate limiting.", + Optional: true, + Type: schema.TypeString, + }, + }), + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "allowed_tries": { + Description: "The number of login attempts allowed before login is blocked.", + Optional: true, + Type: schema.TypeInt, + }, + "authentication_backend": { + Description: "The internal backend. Enter `internal`.", + Optional: true, + Type: schema.TypeString, + }, + "block_expiry_seconds": { + Description: "The duration of time that login remains blocked after a failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "max_blocked_clients": { + Description: "The maximum number of blocked IP addresses.", + Optional: true, + Type: schema.TypeInt, + }, + "max_tracked_clients": { + Description: "The maximum number of tracked IP addresses that have failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "time_window_seconds": { + Description: "The window of time in which the value for `allowed_tries` is enforced.", + Optional: true, + Type: schema.TypeInt, + }, + "type": { + Description: "The type of rate limiting.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "ip_rate_limiting": { + Description: "IP address rate limiting settings.", + DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ + "allowed_tries": { + Description: "The number of login attempts allowed before login is blocked.", + Optional: true, + Type: schema.TypeInt, + }, + "block_expiry_seconds": { + Description: "The duration of time that login remains blocked after a failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "max_blocked_clients": { + Description: "The maximum number of blocked IP addresses.", + Optional: true, + Type: schema.TypeInt, + }, + "max_tracked_clients": { + Description: "The maximum number of tracked IP addresses that have failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "time_window_seconds": { + Description: "The window of time in which the value for `allowed_tries` is enforced.", + Optional: true, + Type: schema.TypeInt, + }, + "type": { + Description: "The type of rate limiting.", + Optional: true, + Type: schema.TypeString, + }, + }), + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "allowed_tries": { + Description: "The number of login attempts allowed before login is blocked.", + Optional: true, + Type: schema.TypeInt, + }, + "block_expiry_seconds": { + Description: "The duration of time that login remains blocked after a failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "max_blocked_clients": { + Description: "The maximum number of blocked IP addresses.", + Optional: true, + Type: schema.TypeInt, + }, + "max_tracked_clients": { + Description: "The maximum number of tracked IP addresses that have failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "time_window_seconds": { + Description: "The window of time in which the value for `allowed_tries` is enforced.", + Optional: true, + Type: schema.TypeInt, + }, + "type": { + Description: "The type of rate limiting.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + }), + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "internal_authentication_backend_limiting": { + Description: ".", + DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ + "allowed_tries": { + Description: "The number of login attempts allowed before login is blocked.", + Optional: true, + Type: schema.TypeInt, + }, + "authentication_backend": { + Description: "The internal backend. Enter `internal`.", + Optional: true, + Type: schema.TypeString, + }, + "block_expiry_seconds": { + Description: "The duration of time that login remains blocked after a failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "max_blocked_clients": { + Description: "The maximum number of blocked IP addresses.", + Optional: true, + Type: schema.TypeInt, + }, + "max_tracked_clients": { + Description: "The maximum number of tracked IP addresses that have failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "time_window_seconds": { + Description: "The window of time in which the value for `allowed_tries` is enforced.", + Optional: true, + Type: schema.TypeInt, + }, + "type": { + Description: "The type of rate limiting.", + Optional: true, + Type: schema.TypeString, + }, + }), + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "allowed_tries": { + Description: "The number of login attempts allowed before login is blocked.", + Optional: true, + Type: schema.TypeInt, + }, + "authentication_backend": { + Description: "The internal backend. Enter `internal`.", + Optional: true, + Type: schema.TypeString, + }, + "block_expiry_seconds": { + Description: "The duration of time that login remains blocked after a failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "max_blocked_clients": { + Description: "The maximum number of blocked IP addresses.", + Optional: true, + Type: schema.TypeInt, + }, + "max_tracked_clients": { + Description: "The maximum number of tracked IP addresses that have failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "time_window_seconds": { + Description: "The window of time in which the value for `allowed_tries` is enforced.", + Optional: true, + Type: schema.TypeInt, + }, + "type": { + Description: "The type of rate limiting.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "ip_rate_limiting": { + Description: "IP address rate limiting settings.", + DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ + "allowed_tries": { + Description: "The number of login attempts allowed before login is blocked.", + Optional: true, + Type: schema.TypeInt, + }, + "block_expiry_seconds": { + Description: "The duration of time that login remains blocked after a failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "max_blocked_clients": { + Description: "The maximum number of blocked IP addresses.", + Optional: true, + Type: schema.TypeInt, + }, + "max_tracked_clients": { + Description: "The maximum number of tracked IP addresses that have failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "time_window_seconds": { + Description: "The window of time in which the value for `allowed_tries` is enforced.", + Optional: true, + Type: schema.TypeInt, + }, + "type": { + Description: "The type of rate limiting.", + Optional: true, + Type: schema.TypeString, + }, + }), + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "allowed_tries": { + Description: "The number of login attempts allowed before login is blocked.", + Optional: true, + Type: schema.TypeInt, + }, + "block_expiry_seconds": { + Description: "The duration of time that login remains blocked after a failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "max_blocked_clients": { + Description: "The maximum number of blocked IP addresses.", + Optional: true, + Type: schema.TypeInt, + }, + "max_tracked_clients": { + Description: "The maximum number of tracked IP addresses that have failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "time_window_seconds": { + Description: "The window of time in which the value for `allowed_tries` is enforced.", + Optional: true, + Type: schema.TypeInt, + }, + "type": { + Description: "The type of rate limiting.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, "cluster_max_shards_per_node": { Description: "Controls the number of shards allowed in the cluster per data node.", Optional: true, @@ -5506,6 +5905,42 @@ func ServiceTypeOpensearch() *schema.Schema { Optional: true, Type: schema.TypeInt, }, + "ism_enabled": { + Default: true, + Description: "Specifies whether ISM is enabled or not. The default value is `true`.", + Optional: true, + Type: schema.TypeBool, + }, + "ism_history_enabled": { + Default: true, + Description: "Specifies whether audit history is enabled or not. The logs from ISM are automatically indexed to a logs document. The default value is `true`.", + Optional: true, + Type: schema.TypeBool, + }, + "ism_history_max_age": { + Default: "24", + Description: "The maximum age before rolling over the audit history index in hours. The default value is `24`.", + Optional: true, + Type: schema.TypeInt, + }, + "ism_history_max_docs": { + Default: "2500000", + Description: "The maximum number of documents before rolling over the audit history index. The default value is `2500000`.", + Optional: true, + Type: schema.TypeInt, + }, + "ism_history_rollover_check_period": { + Default: "8", + Description: "The time between rollover checks for the audit history index in hours. The default value is `8`.", + Optional: true, + Type: schema.TypeInt, + }, + "ism_history_rollover_retention_period": { + Default: "30", + Description: "How long audit history indices are kept in days. The default value is `30`.", + Optional: true, + Type: schema.TypeInt, + }, "override_main_response_version": { Description: "Compatibility mode sets OpenSearch to report its version as 7.10 so clients continue to work. Default is false.", Optional: true, @@ -5595,6 +6030,316 @@ func ServiceTypeOpensearch() *schema.Schema { Optional: true, Type: schema.TypeBool, }, + "auth_failure_listeners": { + Description: "Opensearch Security Plugin Settings.", + DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ + "internal_authentication_backend_limiting": { + Description: ".", + DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ + "allowed_tries": { + Description: "The number of login attempts allowed before login is blocked.", + Optional: true, + Type: schema.TypeInt, + }, + "authentication_backend": { + Description: "The internal backend. Enter `internal`.", + Optional: true, + Type: schema.TypeString, + }, + "block_expiry_seconds": { + Description: "The duration of time that login remains blocked after a failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "max_blocked_clients": { + Description: "The maximum number of blocked IP addresses.", + Optional: true, + Type: schema.TypeInt, + }, + "max_tracked_clients": { + Description: "The maximum number of tracked IP addresses that have failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "time_window_seconds": { + Description: "The window of time in which the value for `allowed_tries` is enforced.", + Optional: true, + Type: schema.TypeInt, + }, + "type": { + Description: "The type of rate limiting.", + Optional: true, + Type: schema.TypeString, + }, + }), + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "allowed_tries": { + Description: "The number of login attempts allowed before login is blocked.", + Optional: true, + Type: schema.TypeInt, + }, + "authentication_backend": { + Description: "The internal backend. Enter `internal`.", + Optional: true, + Type: schema.TypeString, + }, + "block_expiry_seconds": { + Description: "The duration of time that login remains blocked after a failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "max_blocked_clients": { + Description: "The maximum number of blocked IP addresses.", + Optional: true, + Type: schema.TypeInt, + }, + "max_tracked_clients": { + Description: "The maximum number of tracked IP addresses that have failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "time_window_seconds": { + Description: "The window of time in which the value for `allowed_tries` is enforced.", + Optional: true, + Type: schema.TypeInt, + }, + "type": { + Description: "The type of rate limiting.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "ip_rate_limiting": { + Description: "IP address rate limiting settings.", + DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ + "allowed_tries": { + Description: "The number of login attempts allowed before login is blocked.", + Optional: true, + Type: schema.TypeInt, + }, + "block_expiry_seconds": { + Description: "The duration of time that login remains blocked after a failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "max_blocked_clients": { + Description: "The maximum number of blocked IP addresses.", + Optional: true, + Type: schema.TypeInt, + }, + "max_tracked_clients": { + Description: "The maximum number of tracked IP addresses that have failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "time_window_seconds": { + Description: "The window of time in which the value for `allowed_tries` is enforced.", + Optional: true, + Type: schema.TypeInt, + }, + "type": { + Description: "The type of rate limiting.", + Optional: true, + Type: schema.TypeString, + }, + }), + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "allowed_tries": { + Description: "The number of login attempts allowed before login is blocked.", + Optional: true, + Type: schema.TypeInt, + }, + "block_expiry_seconds": { + Description: "The duration of time that login remains blocked after a failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "max_blocked_clients": { + Description: "The maximum number of blocked IP addresses.", + Optional: true, + Type: schema.TypeInt, + }, + "max_tracked_clients": { + Description: "The maximum number of tracked IP addresses that have failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "time_window_seconds": { + Description: "The window of time in which the value for `allowed_tries` is enforced.", + Optional: true, + Type: schema.TypeInt, + }, + "type": { + Description: "The type of rate limiting.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + }), + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "internal_authentication_backend_limiting": { + Description: ".", + DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ + "allowed_tries": { + Description: "The number of login attempts allowed before login is blocked.", + Optional: true, + Type: schema.TypeInt, + }, + "authentication_backend": { + Description: "The internal backend. Enter `internal`.", + Optional: true, + Type: schema.TypeString, + }, + "block_expiry_seconds": { + Description: "The duration of time that login remains blocked after a failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "max_blocked_clients": { + Description: "The maximum number of blocked IP addresses.", + Optional: true, + Type: schema.TypeInt, + }, + "max_tracked_clients": { + Description: "The maximum number of tracked IP addresses that have failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "time_window_seconds": { + Description: "The window of time in which the value for `allowed_tries` is enforced.", + Optional: true, + Type: schema.TypeInt, + }, + "type": { + Description: "The type of rate limiting.", + Optional: true, + Type: schema.TypeString, + }, + }), + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "allowed_tries": { + Description: "The number of login attempts allowed before login is blocked.", + Optional: true, + Type: schema.TypeInt, + }, + "authentication_backend": { + Description: "The internal backend. Enter `internal`.", + Optional: true, + Type: schema.TypeString, + }, + "block_expiry_seconds": { + Description: "The duration of time that login remains blocked after a failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "max_blocked_clients": { + Description: "The maximum number of blocked IP addresses.", + Optional: true, + Type: schema.TypeInt, + }, + "max_tracked_clients": { + Description: "The maximum number of tracked IP addresses that have failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "time_window_seconds": { + Description: "The window of time in which the value for `allowed_tries` is enforced.", + Optional: true, + Type: schema.TypeInt, + }, + "type": { + Description: "The type of rate limiting.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + "ip_rate_limiting": { + Description: "IP address rate limiting settings.", + DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ + "allowed_tries": { + Description: "The number of login attempts allowed before login is blocked.", + Optional: true, + Type: schema.TypeInt, + }, + "block_expiry_seconds": { + Description: "The duration of time that login remains blocked after a failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "max_blocked_clients": { + Description: "The maximum number of blocked IP addresses.", + Optional: true, + Type: schema.TypeInt, + }, + "max_tracked_clients": { + Description: "The maximum number of tracked IP addresses that have failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "time_window_seconds": { + Description: "The window of time in which the value for `allowed_tries` is enforced.", + Optional: true, + Type: schema.TypeInt, + }, + "type": { + Description: "The type of rate limiting.", + Optional: true, + Type: schema.TypeString, + }, + }), + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "allowed_tries": { + Description: "The number of login attempts allowed before login is blocked.", + Optional: true, + Type: schema.TypeInt, + }, + "block_expiry_seconds": { + Description: "The duration of time that login remains blocked after a failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "max_blocked_clients": { + Description: "The maximum number of blocked IP addresses.", + Optional: true, + Type: schema.TypeInt, + }, + "max_tracked_clients": { + Description: "The maximum number of tracked IP addresses that have failed login.", + Optional: true, + Type: schema.TypeInt, + }, + "time_window_seconds": { + Description: "The window of time in which the value for `allowed_tries` is enforced.", + Optional: true, + Type: schema.TypeInt, + }, + "type": { + Description: "The type of rate limiting.", + Optional: true, + Type: schema.TypeString, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, + }}, + MaxItems: 1, + Optional: true, + Type: schema.TypeList, + }, "cluster_max_shards_per_node": { Description: "Controls the number of shards allowed in the cluster per data node.", Optional: true, @@ -5666,6 +6411,42 @@ func ServiceTypeOpensearch() *schema.Schema { Optional: true, Type: schema.TypeInt, }, + "ism_enabled": { + Default: true, + Description: "Specifies whether ISM is enabled or not. The default value is `true`.", + Optional: true, + Type: schema.TypeBool, + }, + "ism_history_enabled": { + Default: true, + Description: "Specifies whether audit history is enabled or not. The logs from ISM are automatically indexed to a logs document. The default value is `true`.", + Optional: true, + Type: schema.TypeBool, + }, + "ism_history_max_age": { + Default: "24", + Description: "The maximum age before rolling over the audit history index in hours. The default value is `24`.", + Optional: true, + Type: schema.TypeInt, + }, + "ism_history_max_docs": { + Default: "2500000", + Description: "The maximum number of documents before rolling over the audit history index. The default value is `2500000`.", + Optional: true, + Type: schema.TypeInt, + }, + "ism_history_rollover_check_period": { + Default: "8", + Description: "The time between rollover checks for the audit history index in hours. The default value is `8`.", + Optional: true, + Type: schema.TypeInt, + }, + "ism_history_rollover_retention_period": { + Default: "30", + Description: "How long audit history indices are kept in days. The default value is `30`.", + Optional: true, + Type: schema.TypeInt, + }, "override_main_response_version": { Description: "Compatibility mode sets OpenSearch to report its version as 7.10 so clients continue to work. Default is false.", Optional: true, diff --git a/internal/schemautil/userconfig/util.go b/internal/schemautil/userconfig/util.go index 27af11509..e592b770d 100644 --- a/internal/schemautil/userconfig/util.go +++ b/internal/schemautil/userconfig/util.go @@ -167,8 +167,8 @@ var constDescriptionReplaceables = map[string]string{ func descriptionForProperty(p map[string]interface{}, t string) (id bool, d string) { if da, ok := p["description"].(string); ok { d = da - } else { - d = p["title"].(string) + } else if title, ok := p["title"].(string); ok { + d = title } if strings.Contains(strings.ToLower(d), "deprecated") { From b41578aed2f924acb72651407a3a9873221d0f15 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Oct 2023 08:16:36 +0000 Subject: [PATCH 05/27] build(deps): bump golang.org/x/sync from 0.3.0 to 0.4.0 (#1384) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 200a73d39..041e9f73e 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/stoewer/go-strcase v1.3.0 github.com/stretchr/testify v1.8.4 golang.org/x/exp v0.0.0-20230809150735-7b3493d9a819 - golang.org/x/sync v0.3.0 + golang.org/x/sync v0.4.0 golang.org/x/tools v0.6.0 gopkg.in/yaml.v3 v3.0.1 ) diff --git a/go.sum b/go.sum index 5f79866c0..283ea751e 100644 --- a/go.sum +++ b/go.sum @@ -727,8 +727,8 @@ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= From f707c99626f0c7883096e5d05ebb24a17113ef75 Mon Sep 17 00:00:00 2001 From: Murad Biashimov Date: Fri, 6 Oct 2023 14:31:56 +0300 Subject: [PATCH 06/27] feat(plugin): use set instead of list (#1382) --- CHANGELOG.md | 3 - docs/data-sources/service_integration.md | 58 +-- docs/resources/service_integration.md | 58 +-- .../service/serviceintegration/models.go | 36 +- .../service/serviceintegration/userconfig.go | 2 +- .../clickhousekafka/clickhouse_kafka.go | 72 ++-- .../clickhouse_postgresql.go | 40 +- .../userconfig/integration/datadog/datadog.go | 144 +++---- .../external_aws_cloudwatch_metrics.go | 56 +-- .../integration/kafkaconnect/kafka_connect.go | 36 +- .../integration/kafkalogs/kafka_logs.go | 40 +- .../kafkamirrormaker/kafka_mirrormaker.go | 36 +- .../userconfig/integration/logs/logs.go | 40 +- .../userconfig/integration/metrics/metrics.go | 48 +-- internal/schemautil/plugin.go | 22 +- .../serviceintegration/service_integration.go | 398 ------------------ .../service_integration_data_source.go | 51 --- ucgenerator/main.go | 42 +- 18 files changed, 365 insertions(+), 817 deletions(-) delete mode 100644 internal/sdkprovider/service/serviceintegration/service_integration.go delete mode 100644 internal/sdkprovider/service/serviceintegration/service_integration_data_source.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 6e2ba04eb..cc0ee9d69 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,9 +10,6 @@ nav_order: 1 - Migrate `aiven_service_integration` to the Plugin Framework - -## [MAJOR.MINOR.PATCH] - YYYY-MM-DD - ## [4.9.1] - 2023-10-03 - Allow to modify `pg_user` replication settings diff --git a/docs/data-sources/service_integration.md b/docs/data-sources/service_integration.md index f3f35d1e6..c5a1bf004 100644 --- a/docs/data-sources/service_integration.md +++ b/docs/data-sources/service_integration.md @@ -30,18 +30,18 @@ data "aiven_service_integration" "myintegration" { ### Read-Only -- `clickhouse_kafka_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config)) -- `clickhouse_postgresql_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config)) -- `datadog_user_config` (Block List) (see [below for nested schema](#nestedblock--datadog_user_config)) +- `clickhouse_kafka_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config)) +- `clickhouse_postgresql_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config)) +- `datadog_user_config` (Block Set) (see [below for nested schema](#nestedblock--datadog_user_config)) - `destination_endpoint_id` (String) Destination endpoint for the integration (if any) -- `external_aws_cloudwatch_metrics_user_config` (Block List) External AWS CloudWatch Metrics integration user config (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config)) +- `external_aws_cloudwatch_metrics_user_config` (Block Set) External AWS CloudWatch Metrics integration user config (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config)) - `id` (String) The ID of this resource. - `integration_id` (String) Service Integration Id at aiven -- `kafka_connect_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--kafka_connect_user_config)) -- `kafka_logs_user_config` (Block List) (see [below for nested schema](#nestedblock--kafka_logs_user_config)) -- `kafka_mirrormaker_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config)) -- `logs_user_config` (Block List) (see [below for nested schema](#nestedblock--logs_user_config)) -- `metrics_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--metrics_user_config)) +- `kafka_connect_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--kafka_connect_user_config)) +- `kafka_logs_user_config` (Block Set) (see [below for nested schema](#nestedblock--kafka_logs_user_config)) +- `kafka_mirrormaker_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config)) +- `logs_user_config` (Block Set) (see [below for nested schema](#nestedblock--logs_user_config)) +- `metrics_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--metrics_user_config)) - `source_endpoint_id` (String) Source endpoint for the integration (if any) @@ -49,7 +49,7 @@ data "aiven_service_integration" "myintegration" { Read-Only: -- `tables` (Block List) Tables to create (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables)) +- `tables` (Block Set) Tables to create (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables)) ### Nested Schema for `clickhouse_kafka_user_config.tables` @@ -57,7 +57,7 @@ Read-Only: Read-Only: - `auto_offset_reset` (String) Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`. -- `columns` (Block List) Table columns (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--columns)) +- `columns` (Block Set) Table columns (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--columns)) - `data_format` (String) Message data format. The default value is `JSONEachRow`. - `date_time_input_format` (String) Method to read DateTime from text input formats. The default value is `basic`. - `group_name` (String) Kafka consumers group. The default value is `clickhouse`. @@ -68,7 +68,7 @@ Read-Only: - `num_consumers` (Number) The number of consumers per table per replica. The default value is `1`. - `poll_max_batch_size` (Number) Maximum amount of messages to be polled in a single Kafka poll. The default value is `0`. - `skip_broken_messages` (Number) Skip at least this number of broken messages from Kafka topic per block. The default value is `0`. -- `topics` (Block List) Kafka topics (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--topics)) +- `topics` (Block Set) Kafka topics (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--topics)) ### Nested Schema for `clickhouse_kafka_user_config.tables.columns` @@ -94,7 +94,7 @@ Read-Only: Read-Only: -- `databases` (Block List) Databases to expose (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config--databases)) +- `databases` (Block Set) Databases to expose (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config--databases)) ### Nested Schema for `clickhouse_postgresql_user_config.databases` @@ -112,15 +112,15 @@ Read-Only: Read-Only: - `datadog_dbm_enabled` (Boolean) Enable Datadog Database Monitoring. -- `datadog_tags` (Block List) Custom tags provided by user (see [below for nested schema](#nestedblock--datadog_user_config--datadog_tags)) -- `exclude_consumer_groups` (List of String) List of custom metrics. -- `exclude_topics` (List of String) List of topics to exclude. -- `include_consumer_groups` (List of String) List of custom metrics. -- `include_topics` (List of String) List of topics to include. -- `kafka_custom_metrics` (List of String) List of custom metrics. +- `datadog_tags` (Block Set) Custom tags provided by user (see [below for nested schema](#nestedblock--datadog_user_config--datadog_tags)) +- `exclude_consumer_groups` (Set of String) List of custom metrics. +- `exclude_topics` (Set of String) List of topics to exclude. +- `include_consumer_groups` (Set of String) List of custom metrics. +- `include_topics` (Set of String) List of topics to include. +- `kafka_custom_metrics` (Set of String) List of custom metrics. - `max_jmx_metrics` (Number) Maximum number of JMX metrics to send. -- `opensearch` (Block List) Datadog Opensearch Options (see [below for nested schema](#nestedblock--datadog_user_config--opensearch)) -- `redis` (Block List) Datadog Redis Options (see [below for nested schema](#nestedblock--datadog_user_config--redis)) +- `opensearch` (Block Set) Datadog Opensearch Options (see [below for nested schema](#nestedblock--datadog_user_config--opensearch)) +- `redis` (Block Set) Datadog Redis Options (see [below for nested schema](#nestedblock--datadog_user_config--redis)) ### Nested Schema for `datadog_user_config.datadog_tags` @@ -155,8 +155,8 @@ Read-Only: Read-Only: -- `dropped_metrics` (Block List) Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--dropped_metrics)) -- `extra_metrics` (Block List) Metrics to allow through to AWS CloudWatch (in addition to default metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--extra_metrics)) +- `dropped_metrics` (Block Set) Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--dropped_metrics)) +- `extra_metrics` (Block Set) Metrics to allow through to AWS CloudWatch (in addition to default metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--extra_metrics)) ### Nested Schema for `external_aws_cloudwatch_metrics_user_config.dropped_metrics` @@ -182,7 +182,7 @@ Read-Only: Read-Only: -- `kafka_connect` (Block List) Kafka Connect service configuration values (see [below for nested schema](#nestedblock--kafka_connect_user_config--kafka_connect)) +- `kafka_connect` (Block Set) Kafka Connect service configuration values (see [below for nested schema](#nestedblock--kafka_connect_user_config--kafka_connect)) ### Nested Schema for `kafka_connect_user_config.kafka_connect` @@ -202,7 +202,7 @@ Read-Only: Read-Only: - `kafka_topic` (String) Topic name. -- `selected_log_fields` (List of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent. +- `selected_log_fields` (Set of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent. @@ -211,7 +211,7 @@ Read-Only: Read-Only: - `cluster_alias` (String) The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, '.', '_', and '-'. -- `kafka_mirrormaker` (Block List) Kafka MirrorMaker configuration values (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--kafka_mirrormaker)) +- `kafka_mirrormaker` (Block Set) Kafka MirrorMaker configuration values (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--kafka_mirrormaker)) ### Nested Schema for `kafka_mirrormaker_user_config.kafka_mirrormaker` @@ -234,7 +234,7 @@ Read-Only: - `elasticsearch_index_days_max` (Number) Elasticsearch index retention limit. The default value is `3`. - `elasticsearch_index_prefix` (String) Elasticsearch index prefix. The default value is `logs`. -- `selected_log_fields` (List of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent. +- `selected_log_fields` (Set of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent. @@ -245,7 +245,7 @@ Read-Only: - `database` (String) Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to 'metrics'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - `retention_days` (Number) Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days. - `ro_username` (String) Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to 'metrics_reader'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. -- `source_mysql` (Block List) Configuration options for metrics where source service is MySQL (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql)) +- `source_mysql` (Block Set) Configuration options for metrics where source service is MySQL (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql)) - `username` (String) Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to 'metrics_writer'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. @@ -253,7 +253,7 @@ Read-Only: Read-Only: -- `telegraf` (Block List) Configuration options for Telegraf MySQL input plugin (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql--telegraf)) +- `telegraf` (Block Set) Configuration options for Telegraf MySQL input plugin (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql--telegraf)) ### Nested Schema for `metrics_user_config.source_mysql.telegraf` diff --git a/docs/resources/service_integration.md b/docs/resources/service_integration.md index 9218f0576..b82e11a9d 100644 --- a/docs/resources/service_integration.md +++ b/docs/resources/service_integration.md @@ -33,17 +33,17 @@ resource "aiven_service_integration" "my_integration_metrics" { ### Optional -- `clickhouse_kafka_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config)) -- `clickhouse_postgresql_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config)) -- `datadog_user_config` (Block List) (see [below for nested schema](#nestedblock--datadog_user_config)) +- `clickhouse_kafka_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config)) +- `clickhouse_postgresql_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config)) +- `datadog_user_config` (Block Set) (see [below for nested schema](#nestedblock--datadog_user_config)) - `destination_endpoint_id` (String) Destination endpoint for the integration (if any) - `destination_service_name` (String) Destination service for the integration (if any) -- `external_aws_cloudwatch_metrics_user_config` (Block List) External AWS CloudWatch Metrics integration user config (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config)) -- `kafka_connect_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--kafka_connect_user_config)) -- `kafka_logs_user_config` (Block List) (see [below for nested schema](#nestedblock--kafka_logs_user_config)) -- `kafka_mirrormaker_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config)) -- `logs_user_config` (Block List) (see [below for nested schema](#nestedblock--logs_user_config)) -- `metrics_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--metrics_user_config)) +- `external_aws_cloudwatch_metrics_user_config` (Block Set) External AWS CloudWatch Metrics integration user config (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config)) +- `kafka_connect_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--kafka_connect_user_config)) +- `kafka_logs_user_config` (Block Set) (see [below for nested schema](#nestedblock--kafka_logs_user_config)) +- `kafka_mirrormaker_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config)) +- `logs_user_config` (Block Set) (see [below for nested schema](#nestedblock--logs_user_config)) +- `metrics_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--metrics_user_config)) - `source_endpoint_id` (String) Source endpoint for the integration (if any) - `source_service_name` (String) Source service for the integration (if any) - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) @@ -58,7 +58,7 @@ resource "aiven_service_integration" "my_integration_metrics" { Optional: -- `tables` (Block List) Tables to create (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables)) +- `tables` (Block Set) Tables to create (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables)) ### Nested Schema for `clickhouse_kafka_user_config.tables` @@ -72,7 +72,7 @@ Required: Optional: - `auto_offset_reset` (String) Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`. -- `columns` (Block List) Table columns (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--columns)) +- `columns` (Block Set) Table columns (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--columns)) - `date_time_input_format` (String) Method to read DateTime from text input formats. The default value is `basic`. - `handle_error_mode` (String) How to handle errors for Kafka engine. The default value is `default`. - `max_block_size` (Number) Number of row collected by poll(s) for flushing data from Kafka. The default value is `0`. @@ -80,7 +80,7 @@ Optional: - `num_consumers` (Number) The number of consumers per table per replica. The default value is `1`. - `poll_max_batch_size` (Number) Maximum amount of messages to be polled in a single Kafka poll. The default value is `0`. - `skip_broken_messages` (Number) Skip at least this number of broken messages from Kafka topic per block. The default value is `0`. -- `topics` (Block List) Kafka topics (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--topics)) +- `topics` (Block Set) Kafka topics (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--topics)) ### Nested Schema for `clickhouse_kafka_user_config.tables.columns` @@ -106,7 +106,7 @@ Required: Optional: -- `databases` (Block List) Databases to expose (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config--databases)) +- `databases` (Block Set) Databases to expose (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config--databases)) ### Nested Schema for `clickhouse_postgresql_user_config.databases` @@ -124,15 +124,15 @@ Optional: Optional: - `datadog_dbm_enabled` (Boolean) Enable Datadog Database Monitoring. -- `datadog_tags` (Block List) Custom tags provided by user (see [below for nested schema](#nestedblock--datadog_user_config--datadog_tags)) -- `exclude_consumer_groups` (List of String) List of custom metrics. -- `exclude_topics` (List of String) List of topics to exclude. -- `include_consumer_groups` (List of String) List of custom metrics. -- `include_topics` (List of String) List of topics to include. -- `kafka_custom_metrics` (List of String) List of custom metrics. +- `datadog_tags` (Block Set) Custom tags provided by user (see [below for nested schema](#nestedblock--datadog_user_config--datadog_tags)) +- `exclude_consumer_groups` (Set of String) List of custom metrics. +- `exclude_topics` (Set of String) List of topics to exclude. +- `include_consumer_groups` (Set of String) List of custom metrics. +- `include_topics` (Set of String) List of topics to include. +- `kafka_custom_metrics` (Set of String) List of custom metrics. - `max_jmx_metrics` (Number) Maximum number of JMX metrics to send. -- `opensearch` (Block List) Datadog Opensearch Options (see [below for nested schema](#nestedblock--datadog_user_config--opensearch)) -- `redis` (Block List) Datadog Redis Options (see [below for nested schema](#nestedblock--datadog_user_config--redis)) +- `opensearch` (Block Set) Datadog Opensearch Options (see [below for nested schema](#nestedblock--datadog_user_config--opensearch)) +- `redis` (Block Set) Datadog Redis Options (see [below for nested schema](#nestedblock--datadog_user_config--redis)) ### Nested Schema for `datadog_user_config.datadog_tags` @@ -170,8 +170,8 @@ Optional: Optional: -- `dropped_metrics` (Block List) Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--dropped_metrics)) -- `extra_metrics` (Block List) Metrics to allow through to AWS CloudWatch (in addition to default metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--extra_metrics)) +- `dropped_metrics` (Block Set) Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--dropped_metrics)) +- `extra_metrics` (Block Set) Metrics to allow through to AWS CloudWatch (in addition to default metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--extra_metrics)) ### Nested Schema for `external_aws_cloudwatch_metrics_user_config.dropped_metrics` @@ -197,7 +197,7 @@ Required: Optional: -- `kafka_connect` (Block List) Kafka Connect service configuration values (see [below for nested schema](#nestedblock--kafka_connect_user_config--kafka_connect)) +- `kafka_connect` (Block Set) Kafka Connect service configuration values (see [below for nested schema](#nestedblock--kafka_connect_user_config--kafka_connect)) ### Nested Schema for `kafka_connect_user_config.kafka_connect` @@ -220,7 +220,7 @@ Required: Optional: -- `selected_log_fields` (List of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent. +- `selected_log_fields` (Set of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent. @@ -229,7 +229,7 @@ Optional: Optional: - `cluster_alias` (String) The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, '.', '_', and '-'. -- `kafka_mirrormaker` (Block List) Kafka MirrorMaker configuration values (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--kafka_mirrormaker)) +- `kafka_mirrormaker` (Block Set) Kafka MirrorMaker configuration values (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--kafka_mirrormaker)) ### Nested Schema for `kafka_mirrormaker_user_config.kafka_mirrormaker` @@ -252,7 +252,7 @@ Optional: - `elasticsearch_index_days_max` (Number) Elasticsearch index retention limit. The default value is `3`. - `elasticsearch_index_prefix` (String) Elasticsearch index prefix. The default value is `logs`. -- `selected_log_fields` (List of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent. +- `selected_log_fields` (Set of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent. @@ -263,7 +263,7 @@ Optional: - `database` (String) Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to 'metrics'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - `retention_days` (Number) Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days. - `ro_username` (String) Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to 'metrics_reader'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. -- `source_mysql` (Block List) Configuration options for metrics where source service is MySQL (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql)) +- `source_mysql` (Block Set) Configuration options for metrics where source service is MySQL (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql)) - `username` (String) Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to 'metrics_writer'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. @@ -271,7 +271,7 @@ Optional: Optional: -- `telegraf` (Block List) Configuration options for Telegraf MySQL input plugin (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql--telegraf)) +- `telegraf` (Block Set) Configuration options for Telegraf MySQL input plugin (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql--telegraf)) ### Nested Schema for `metrics_user_config.source_mysql.telegraf` diff --git a/internal/plugin/service/serviceintegration/models.go b/internal/plugin/service/serviceintegration/models.go index c499147c6..f2a752a8e 100644 --- a/internal/plugin/service/serviceintegration/models.go +++ b/internal/plugin/service/serviceintegration/models.go @@ -26,15 +26,15 @@ type resourceModel struct { IntegrationType types.String `tfsdk:"integration_type" copier:"IntegrationType"` SourceEndpointID types.String `tfsdk:"source_endpoint_id" copier:"SourceEndpointID"` SourceServiceName types.String `tfsdk:"source_service_name" copier:"SourceServiceName"` - ClickhouseKafkaUserConfig types.List `tfsdk:"clickhouse_kafka_user_config" copier:"ClickhouseKafkaUserConfig"` - ClickhousePostgresqlUserConfig types.List `tfsdk:"clickhouse_postgresql_user_config" copier:"ClickhousePostgresqlUserConfig"` - DatadogUserConfig types.List `tfsdk:"datadog_user_config" copier:"DatadogUserConfig"` - ExternalAwsCloudwatchMetricsUserConfig types.List `tfsdk:"external_aws_cloudwatch_metrics_user_config" copier:"ExternalAwsCloudwatchMetricsUserConfig"` - KafkaConnectUserConfig types.List `tfsdk:"kafka_connect_user_config" copier:"KafkaConnectUserConfig"` - KafkaLogsUserConfig types.List `tfsdk:"kafka_logs_user_config" copier:"KafkaLogsUserConfig"` - KafkaMirrormakerUserConfig types.List `tfsdk:"kafka_mirrormaker_user_config" copier:"KafkaMirrormakerUserConfig"` - LogsUserConfig types.List `tfsdk:"logs_user_config" copier:"LogsUserConfig"` - MetricsUserConfig types.List `tfsdk:"metrics_user_config" copier:"MetricsUserConfig"` + ClickhouseKafkaUserConfig types.Set `tfsdk:"clickhouse_kafka_user_config" copier:"ClickhouseKafkaUserConfig"` + ClickhousePostgresqlUserConfig types.Set `tfsdk:"clickhouse_postgresql_user_config" copier:"ClickhousePostgresqlUserConfig"` + DatadogUserConfig types.Set `tfsdk:"datadog_user_config" copier:"DatadogUserConfig"` + ExternalAwsCloudwatchMetricsUserConfig types.Set `tfsdk:"external_aws_cloudwatch_metrics_user_config" copier:"ExternalAwsCloudwatchMetricsUserConfig"` + KafkaConnectUserConfig types.Set `tfsdk:"kafka_connect_user_config" copier:"KafkaConnectUserConfig"` + KafkaLogsUserConfig types.Set `tfsdk:"kafka_logs_user_config" copier:"KafkaLogsUserConfig"` + KafkaMirrormakerUserConfig types.Set `tfsdk:"kafka_mirrormaker_user_config" copier:"KafkaMirrormakerUserConfig"` + LogsUserConfig types.Set `tfsdk:"logs_user_config" copier:"LogsUserConfig"` + MetricsUserConfig types.Set `tfsdk:"metrics_user_config" copier:"MetricsUserConfig"` } type dataSourceModel struct { @@ -46,15 +46,15 @@ type dataSourceModel struct { IntegrationType types.String `tfsdk:"integration_type" copier:"IntegrationType"` SourceEndpointID types.String `tfsdk:"source_endpoint_id" copier:"SourceEndpointID"` SourceServiceName types.String `tfsdk:"source_service_name" copier:"SourceServiceName"` - ClickhouseKafkaUserConfig types.List `tfsdk:"clickhouse_kafka_user_config" copier:"ClickhouseKafkaUserConfig"` - ClickhousePostgresqlUserConfig types.List `tfsdk:"clickhouse_postgresql_user_config" copier:"ClickhousePostgresqlUserConfig"` - DatadogUserConfig types.List `tfsdk:"datadog_user_config" copier:"DatadogUserConfig"` - ExternalAwsCloudwatchMetricsUserConfig types.List `tfsdk:"external_aws_cloudwatch_metrics_user_config" copier:"ExternalAwsCloudwatchMetricsUserConfig"` - KafkaConnectUserConfig types.List `tfsdk:"kafka_connect_user_config" copier:"KafkaConnectUserConfig"` - KafkaLogsUserConfig types.List `tfsdk:"kafka_logs_user_config" copier:"KafkaLogsUserConfig"` - KafkaMirrormakerUserConfig types.List `tfsdk:"kafka_mirrormaker_user_config" copier:"KafkaMirrormakerUserConfig"` - LogsUserConfig types.List `tfsdk:"logs_user_config" copier:"LogsUserConfig"` - MetricsUserConfig types.List `tfsdk:"metrics_user_config" copier:"MetricsUserConfig"` + ClickhouseKafkaUserConfig types.Set `tfsdk:"clickhouse_kafka_user_config" copier:"ClickhouseKafkaUserConfig"` + ClickhousePostgresqlUserConfig types.Set `tfsdk:"clickhouse_postgresql_user_config" copier:"ClickhousePostgresqlUserConfig"` + DatadogUserConfig types.Set `tfsdk:"datadog_user_config" copier:"DatadogUserConfig"` + ExternalAwsCloudwatchMetricsUserConfig types.Set `tfsdk:"external_aws_cloudwatch_metrics_user_config" copier:"ExternalAwsCloudwatchMetricsUserConfig"` + KafkaConnectUserConfig types.Set `tfsdk:"kafka_connect_user_config" copier:"KafkaConnectUserConfig"` + KafkaLogsUserConfig types.Set `tfsdk:"kafka_logs_user_config" copier:"KafkaLogsUserConfig"` + KafkaMirrormakerUserConfig types.Set `tfsdk:"kafka_mirrormaker_user_config" copier:"KafkaMirrormakerUserConfig"` + LogsUserConfig types.Set `tfsdk:"logs_user_config" copier:"LogsUserConfig"` + MetricsUserConfig types.Set `tfsdk:"metrics_user_config" copier:"MetricsUserConfig"` } func (p *resourceModel) getID() string { diff --git a/internal/plugin/service/serviceintegration/userconfig.go b/internal/plugin/service/serviceintegration/userconfig.go index 901eaf406..d398f18ba 100644 --- a/internal/plugin/service/serviceintegration/userconfig.go +++ b/internal/plugin/service/serviceintegration/userconfig.go @@ -131,6 +131,6 @@ func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *resourceM } } -func isSet(o types.List) bool { +func isSet(o types.Set) bool { return !(o.IsUnknown() || o.IsNull()) } diff --git a/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka.go b/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka.go index 02b903200..b22a9ef20 100644 --- a/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka.go +++ b/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka.go @@ -5,7 +5,7 @@ package clickhousekafka import ( "context" - listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" attr "github.com/hashicorp/terraform-plugin-framework/attr" datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" diag "github.com/hashicorp/terraform-plugin-framework/diag" @@ -19,10 +19,10 @@ import ( ) // NewResourceSchema returns resource schema -func NewResourceSchema() resource.ListNestedBlock { - return resource.ListNestedBlock{ +func NewResourceSchema() resource.SetNestedBlock { + return resource.SetNestedBlock{ Description: "Integration user config", - NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"tables": resource.ListNestedBlock{ + NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"tables": resource.SetNestedBlock{ Description: "Tables to create", NestedObject: resource.NestedBlockObject{ Attributes: map[string]resource.Attribute{ @@ -88,7 +88,7 @@ func NewResourceSchema() resource.ListNestedBlock { }, }, Blocks: map[string]resource.Block{ - "columns": resource.ListNestedBlock{ + "columns": resource.SetNestedBlock{ Description: "Table columns", NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ "name": resource.StringAttribute{ @@ -100,29 +100,29 @@ func NewResourceSchema() resource.ListNestedBlock { Required: true, }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(100)}, + Validators: []validator.Set{setvalidator.SizeAtMost(100)}, }, - "topics": resource.ListNestedBlock{ + "topics": resource.SetNestedBlock{ Description: "Kafka topics", NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{"name": resource.StringAttribute{ Description: "Name of the topic.", Required: true, }}}, - Validators: []validator.List{listvalidator.SizeAtMost(100)}, + Validators: []validator.Set{setvalidator.SizeAtMost(100)}, }, }, }, - Validators: []validator.List{listvalidator.SizeAtMost(100)}, + Validators: []validator.Set{setvalidator.SizeAtMost(100)}, }}}, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // NewDataSourceSchema returns datasource schema -func NewDataSourceSchema() datasource.ListNestedBlock { - return datasource.ListNestedBlock{ +func NewDataSourceSchema() datasource.SetNestedBlock { + return datasource.SetNestedBlock{ Description: "Integration user config", - NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"tables": datasource.ListNestedBlock{ + NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"tables": datasource.SetNestedBlock{ Description: "Tables to create", NestedObject: datasource.NestedBlockObject{ Attributes: map[string]datasource.Attribute{ @@ -172,7 +172,7 @@ func NewDataSourceSchema() datasource.ListNestedBlock { }, }, Blocks: map[string]datasource.Block{ - "columns": datasource.ListNestedBlock{ + "columns": datasource.SetNestedBlock{ Description: "Table columns", NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ "name": datasource.StringAttribute{ @@ -184,27 +184,27 @@ func NewDataSourceSchema() datasource.ListNestedBlock { Description: "Column type.", }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(100)}, + Validators: []validator.Set{setvalidator.SizeAtMost(100)}, }, - "topics": datasource.ListNestedBlock{ + "topics": datasource.SetNestedBlock{ Description: "Kafka topics", NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{"name": datasource.StringAttribute{ Computed: true, Description: "Name of the topic.", }}}, - Validators: []validator.List{listvalidator.SizeAtMost(100)}, + Validators: []validator.Set{setvalidator.SizeAtMost(100)}, }, }, }, - Validators: []validator.List{listvalidator.SizeAtMost(100)}, + Validators: []validator.Set{setvalidator.SizeAtMost(100)}, }}}, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // tfoUserConfig Integration user config type tfoUserConfig struct { - Tables types.List `tfsdk:"tables"` + Tables types.Set `tfsdk:"tables"` } // dtoUserConfig request/response object @@ -214,7 +214,7 @@ type dtoUserConfig struct { // expandUserConfig expands tf object into dto object func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { - tablesVar := schemautil.ExpandListNested[tfoTables, dtoTables](ctx, diags, expandTables, o.Tables) + tablesVar := schemautil.ExpandSetNested[tfoTables, dtoTables](ctx, diags, expandTables, o.Tables) if diags.HasError() { return nil } @@ -223,19 +223,19 @@ func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserCo // flattenUserConfig flattens dto object into tf object func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { - tablesVar := schemautil.FlattenListNested[dtoTables, tfoTables](ctx, diags, flattenTables, tablesAttrs, o.Tables) + tablesVar := schemautil.FlattenSetNested[dtoTables, tfoTables](ctx, diags, flattenTables, tablesAttrs, o.Tables) if diags.HasError() { return nil } return &tfoUserConfig{Tables: tablesVar} } -var userConfigAttrs = map[string]attr.Type{"tables": types.ListType{ElemType: types.ObjectType{AttrTypes: tablesAttrs}}} +var userConfigAttrs = map[string]attr.Type{"tables": types.SetType{ElemType: types.ObjectType{AttrTypes: tablesAttrs}}} // tfoTables Table to create type tfoTables struct { AutoOffsetReset types.String `tfsdk:"auto_offset_reset"` - Columns types.List `tfsdk:"columns"` + Columns types.Set `tfsdk:"columns"` DataFormat types.String `tfsdk:"data_format"` DateTimeInputFormat types.String `tfsdk:"date_time_input_format"` GroupName types.String `tfsdk:"group_name"` @@ -246,7 +246,7 @@ type tfoTables struct { NumConsumers types.Int64 `tfsdk:"num_consumers"` PollMaxBatchSize types.Int64 `tfsdk:"poll_max_batch_size"` SkipBrokenMessages types.Int64 `tfsdk:"skip_broken_messages"` - Topics types.List `tfsdk:"topics"` + Topics types.Set `tfsdk:"topics"` } // dtoTables request/response object @@ -268,11 +268,11 @@ type dtoTables struct { // expandTables expands tf object into dto object func expandTables(ctx context.Context, diags *diag.Diagnostics, o *tfoTables) *dtoTables { - columnsVar := schemautil.ExpandListNested[tfoColumns, dtoColumns](ctx, diags, expandColumns, o.Columns) + columnsVar := schemautil.ExpandSetNested[tfoColumns, dtoColumns](ctx, diags, expandColumns, o.Columns) if diags.HasError() { return nil } - topicsVar := schemautil.ExpandListNested[tfoTopics, dtoTopics](ctx, diags, expandTopics, o.Topics) + topicsVar := schemautil.ExpandSetNested[tfoTopics, dtoTopics](ctx, diags, expandTopics, o.Topics) if diags.HasError() { return nil } @@ -295,11 +295,11 @@ func expandTables(ctx context.Context, diags *diag.Diagnostics, o *tfoTables) *d // flattenTables flattens dto object into tf object func flattenTables(ctx context.Context, diags *diag.Diagnostics, o *dtoTables) *tfoTables { - columnsVar := schemautil.FlattenListNested[dtoColumns, tfoColumns](ctx, diags, flattenColumns, columnsAttrs, o.Columns) + columnsVar := schemautil.FlattenSetNested[dtoColumns, tfoColumns](ctx, diags, flattenColumns, columnsAttrs, o.Columns) if diags.HasError() { return nil } - topicsVar := schemautil.FlattenListNested[dtoTopics, tfoTopics](ctx, diags, flattenTopics, topicsAttrs, o.Topics) + topicsVar := schemautil.FlattenSetNested[dtoTopics, tfoTopics](ctx, diags, flattenTopics, topicsAttrs, o.Topics) if diags.HasError() { return nil } @@ -322,7 +322,7 @@ func flattenTables(ctx context.Context, diags *diag.Diagnostics, o *dtoTables) * var tablesAttrs = map[string]attr.Type{ "auto_offset_reset": types.StringType, - "columns": types.ListType{ElemType: types.ObjectType{AttrTypes: columnsAttrs}}, + "columns": types.SetType{ElemType: types.ObjectType{AttrTypes: columnsAttrs}}, "data_format": types.StringType, "date_time_input_format": types.StringType, "group_name": types.StringType, @@ -333,7 +333,7 @@ var tablesAttrs = map[string]attr.Type{ "num_consumers": types.Int64Type, "poll_max_batch_size": types.Int64Type, "skip_broken_messages": types.Int64Type, - "topics": types.ListType{ElemType: types.ObjectType{AttrTypes: topicsAttrs}}, + "topics": types.SetType{ElemType: types.ObjectType{AttrTypes: topicsAttrs}}, } // tfoColumns Table column @@ -392,17 +392,17 @@ func flattenTopics(ctx context.Context, diags *diag.Diagnostics, o *dtoTopics) * var topicsAttrs = map[string]attr.Type{"name": types.StringType} // Expand public function that converts tf object into dto -func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { - return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig { + return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set) } // Flatten public function that converts dto into tf object -func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set { o := new(dtoUserConfig) err := schemautil.MapToDTO(m, o) if err != nil { diags.AddError("failed to marshal map user config to dto", err.Error()) - return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs}) } - return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) + return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) } diff --git a/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql.go b/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql.go index a0cbd20b8..e67ab4a68 100644 --- a/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql.go +++ b/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql.go @@ -5,7 +5,7 @@ package clickhousepostgresql import ( "context" - listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" attr "github.com/hashicorp/terraform-plugin-framework/attr" datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" diag "github.com/hashicorp/terraform-plugin-framework/diag" @@ -18,10 +18,10 @@ import ( ) // NewResourceSchema returns resource schema -func NewResourceSchema() resource.ListNestedBlock { - return resource.ListNestedBlock{ +func NewResourceSchema() resource.SetNestedBlock { + return resource.SetNestedBlock{ Description: "Integration user config", - NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"databases": resource.ListNestedBlock{ + NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"databases": resource.SetNestedBlock{ Description: "Databases to expose", NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ "database": resource.StringAttribute{ @@ -37,17 +37,17 @@ func NewResourceSchema() resource.ListNestedBlock { Optional: true, }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(10)}, + Validators: []validator.Set{setvalidator.SizeAtMost(10)}, }}}, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // NewDataSourceSchema returns datasource schema -func NewDataSourceSchema() datasource.ListNestedBlock { - return datasource.ListNestedBlock{ +func NewDataSourceSchema() datasource.SetNestedBlock { + return datasource.SetNestedBlock{ Description: "Integration user config", - NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"databases": datasource.ListNestedBlock{ + NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"databases": datasource.SetNestedBlock{ Description: "Databases to expose", NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ "database": datasource.StringAttribute{ @@ -59,15 +59,15 @@ func NewDataSourceSchema() datasource.ListNestedBlock { Description: "PostgreSQL schema to expose. The default value is `public`.", }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(10)}, + Validators: []validator.Set{setvalidator.SizeAtMost(10)}, }}}, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // tfoUserConfig Integration user config type tfoUserConfig struct { - Databases types.List `tfsdk:"databases"` + Databases types.Set `tfsdk:"databases"` } // dtoUserConfig request/response object @@ -77,7 +77,7 @@ type dtoUserConfig struct { // expandUserConfig expands tf object into dto object func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { - databasesVar := schemautil.ExpandListNested[tfoDatabases, dtoDatabases](ctx, diags, expandDatabases, o.Databases) + databasesVar := schemautil.ExpandSetNested[tfoDatabases, dtoDatabases](ctx, diags, expandDatabases, o.Databases) if diags.HasError() { return nil } @@ -86,14 +86,14 @@ func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserCo // flattenUserConfig flattens dto object into tf object func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { - databasesVar := schemautil.FlattenListNested[dtoDatabases, tfoDatabases](ctx, diags, flattenDatabases, databasesAttrs, o.Databases) + databasesVar := schemautil.FlattenSetNested[dtoDatabases, tfoDatabases](ctx, diags, flattenDatabases, databasesAttrs, o.Databases) if diags.HasError() { return nil } return &tfoUserConfig{Databases: databasesVar} } -var userConfigAttrs = map[string]attr.Type{"databases": types.ListType{ElemType: types.ObjectType{AttrTypes: databasesAttrs}}} +var userConfigAttrs = map[string]attr.Type{"databases": types.SetType{ElemType: types.ObjectType{AttrTypes: databasesAttrs}}} // tfoDatabases Database to expose type tfoDatabases struct { @@ -129,17 +129,17 @@ var databasesAttrs = map[string]attr.Type{ } // Expand public function that converts tf object into dto -func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { - return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig { + return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set) } // Flatten public function that converts dto into tf object -func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set { o := new(dtoUserConfig) err := schemautil.MapToDTO(m, o) if err != nil { diags.AddError("failed to marshal map user config to dto", err.Error()) - return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs}) } - return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) + return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) } diff --git a/internal/plugin/service/userconfig/integration/datadog/datadog.go b/internal/plugin/service/userconfig/integration/datadog/datadog.go index 8a144b214..0cde9d5f6 100644 --- a/internal/plugin/service/userconfig/integration/datadog/datadog.go +++ b/internal/plugin/service/userconfig/integration/datadog/datadog.go @@ -5,7 +5,7 @@ package datadog import ( "context" - listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" attr "github.com/hashicorp/terraform-plugin-framework/attr" datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" diag "github.com/hashicorp/terraform-plugin-framework/diag" @@ -18,8 +18,8 @@ import ( ) // NewResourceSchema returns resource schema -func NewResourceSchema() resource.ListNestedBlock { - return resource.ListNestedBlock{ +func NewResourceSchema() resource.SetNestedBlock { + return resource.SetNestedBlock{ NestedObject: resource.NestedBlockObject{ Attributes: map[string]resource.Attribute{ "datadog_dbm_enabled": resource.BoolAttribute{ @@ -27,40 +27,40 @@ func NewResourceSchema() resource.ListNestedBlock { Description: "Enable Datadog Database Monitoring.", Optional: true, }, - "exclude_consumer_groups": resource.ListAttribute{ + "exclude_consumer_groups": resource.SetAttribute{ Computed: true, Description: "List of custom metrics.", ElementType: types.StringType, Optional: true, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, - "exclude_topics": resource.ListAttribute{ + "exclude_topics": resource.SetAttribute{ Computed: true, Description: "List of topics to exclude.", ElementType: types.StringType, Optional: true, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, - "include_consumer_groups": resource.ListAttribute{ + "include_consumer_groups": resource.SetAttribute{ Computed: true, Description: "List of custom metrics.", ElementType: types.StringType, Optional: true, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, - "include_topics": resource.ListAttribute{ + "include_topics": resource.SetAttribute{ Computed: true, Description: "List of topics to include.", ElementType: types.StringType, Optional: true, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, - "kafka_custom_metrics": resource.ListAttribute{ + "kafka_custom_metrics": resource.SetAttribute{ Computed: true, Description: "List of custom metrics.", ElementType: types.StringType, Optional: true, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, "max_jmx_metrics": resource.Int64Attribute{ Computed: true, @@ -69,7 +69,7 @@ func NewResourceSchema() resource.ListNestedBlock { }, }, Blocks: map[string]resource.Block{ - "datadog_tags": resource.ListNestedBlock{ + "datadog_tags": resource.SetNestedBlock{ Description: "Custom tags provided by user", NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ "comment": resource.StringAttribute{ @@ -82,9 +82,9 @@ func NewResourceSchema() resource.ListNestedBlock { Required: true, }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(32)}, + Validators: []validator.Set{setvalidator.SizeAtMost(32)}, }, - "opensearch": resource.ListNestedBlock{ + "opensearch": resource.SetNestedBlock{ Description: "Datadog Opensearch Options", NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ "index_stats_enabled": resource.BoolAttribute{ @@ -104,7 +104,7 @@ func NewResourceSchema() resource.ListNestedBlock { }, }}, }, - "redis": resource.ListNestedBlock{ + "redis": resource.SetNestedBlock{ Description: "Datadog Redis Options", NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{"command_stats_enabled": resource.BoolAttribute{ Computed: true, @@ -115,48 +115,48 @@ func NewResourceSchema() resource.ListNestedBlock { }, }, }, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // NewDataSourceSchema returns datasource schema -func NewDataSourceSchema() datasource.ListNestedBlock { - return datasource.ListNestedBlock{ +func NewDataSourceSchema() datasource.SetNestedBlock { + return datasource.SetNestedBlock{ NestedObject: datasource.NestedBlockObject{ Attributes: map[string]datasource.Attribute{ "datadog_dbm_enabled": datasource.BoolAttribute{ Computed: true, Description: "Enable Datadog Database Monitoring.", }, - "exclude_consumer_groups": datasource.ListAttribute{ + "exclude_consumer_groups": datasource.SetAttribute{ Computed: true, Description: "List of custom metrics.", ElementType: types.StringType, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, - "exclude_topics": datasource.ListAttribute{ + "exclude_topics": datasource.SetAttribute{ Computed: true, Description: "List of topics to exclude.", ElementType: types.StringType, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, - "include_consumer_groups": datasource.ListAttribute{ + "include_consumer_groups": datasource.SetAttribute{ Computed: true, Description: "List of custom metrics.", ElementType: types.StringType, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, - "include_topics": datasource.ListAttribute{ + "include_topics": datasource.SetAttribute{ Computed: true, Description: "List of topics to include.", ElementType: types.StringType, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, - "kafka_custom_metrics": datasource.ListAttribute{ + "kafka_custom_metrics": datasource.SetAttribute{ Computed: true, Description: "List of custom metrics.", ElementType: types.StringType, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, "max_jmx_metrics": datasource.Int64Attribute{ Computed: true, @@ -164,7 +164,7 @@ func NewDataSourceSchema() datasource.ListNestedBlock { }, }, Blocks: map[string]datasource.Block{ - "datadog_tags": datasource.ListNestedBlock{ + "datadog_tags": datasource.SetNestedBlock{ Description: "Custom tags provided by user", NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ "comment": datasource.StringAttribute{ @@ -176,9 +176,9 @@ func NewDataSourceSchema() datasource.ListNestedBlock { Description: "Tag format and usage are described here: https://docs.datadoghq.com/getting_started/tagging. Tags with prefix 'aiven-' are reserved for Aiven.", }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(32)}, + Validators: []validator.Set{setvalidator.SizeAtMost(32)}, }, - "opensearch": datasource.ListNestedBlock{ + "opensearch": datasource.SetNestedBlock{ Description: "Datadog Opensearch Options", NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ "index_stats_enabled": datasource.BoolAttribute{ @@ -195,7 +195,7 @@ func NewDataSourceSchema() datasource.ListNestedBlock { }, }}, }, - "redis": datasource.ListNestedBlock{ + "redis": datasource.SetNestedBlock{ Description: "Datadog Redis Options", NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{"command_stats_enabled": datasource.BoolAttribute{ Computed: true, @@ -204,22 +204,22 @@ func NewDataSourceSchema() datasource.ListNestedBlock { }, }, }, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // tfoUserConfig type tfoUserConfig struct { DatadogDbmEnabled types.Bool `tfsdk:"datadog_dbm_enabled"` - DatadogTags types.List `tfsdk:"datadog_tags"` - ExcludeConsumerGroups types.List `tfsdk:"exclude_consumer_groups"` - ExcludeTopics types.List `tfsdk:"exclude_topics"` - IncludeConsumerGroups types.List `tfsdk:"include_consumer_groups"` - IncludeTopics types.List `tfsdk:"include_topics"` - KafkaCustomMetrics types.List `tfsdk:"kafka_custom_metrics"` + DatadogTags types.Set `tfsdk:"datadog_tags"` + ExcludeConsumerGroups types.Set `tfsdk:"exclude_consumer_groups"` + ExcludeTopics types.Set `tfsdk:"exclude_topics"` + IncludeConsumerGroups types.Set `tfsdk:"include_consumer_groups"` + IncludeTopics types.Set `tfsdk:"include_topics"` + KafkaCustomMetrics types.Set `tfsdk:"kafka_custom_metrics"` MaxJmxMetrics types.Int64 `tfsdk:"max_jmx_metrics"` - Opensearch types.List `tfsdk:"opensearch"` - Redis types.List `tfsdk:"redis"` + Opensearch types.Set `tfsdk:"opensearch"` + Redis types.Set `tfsdk:"redis"` } // dtoUserConfig request/response object @@ -238,35 +238,35 @@ type dtoUserConfig struct { // expandUserConfig expands tf object into dto object func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { - datadogTagsVar := schemautil.ExpandListNested[tfoDatadogTags, dtoDatadogTags](ctx, diags, expandDatadogTags, o.DatadogTags) + datadogTagsVar := schemautil.ExpandSetNested[tfoDatadogTags, dtoDatadogTags](ctx, diags, expandDatadogTags, o.DatadogTags) if diags.HasError() { return nil } - excludeConsumerGroupsVar := schemautil.ExpandList[string](ctx, diags, o.ExcludeConsumerGroups) + excludeConsumerGroupsVar := schemautil.ExpandSet[string](ctx, diags, o.ExcludeConsumerGroups) if diags.HasError() { return nil } - excludeTopicsVar := schemautil.ExpandList[string](ctx, diags, o.ExcludeTopics) + excludeTopicsVar := schemautil.ExpandSet[string](ctx, diags, o.ExcludeTopics) if diags.HasError() { return nil } - includeConsumerGroupsVar := schemautil.ExpandList[string](ctx, diags, o.IncludeConsumerGroups) + includeConsumerGroupsVar := schemautil.ExpandSet[string](ctx, diags, o.IncludeConsumerGroups) if diags.HasError() { return nil } - includeTopicsVar := schemautil.ExpandList[string](ctx, diags, o.IncludeTopics) + includeTopicsVar := schemautil.ExpandSet[string](ctx, diags, o.IncludeTopics) if diags.HasError() { return nil } - kafkaCustomMetricsVar := schemautil.ExpandList[string](ctx, diags, o.KafkaCustomMetrics) + kafkaCustomMetricsVar := schemautil.ExpandSet[string](ctx, diags, o.KafkaCustomMetrics) if diags.HasError() { return nil } - opensearchVar := schemautil.ExpandListBlockNested[tfoOpensearch, dtoOpensearch](ctx, diags, expandOpensearch, o.Opensearch) + opensearchVar := schemautil.ExpandSetBlockNested[tfoOpensearch, dtoOpensearch](ctx, diags, expandOpensearch, o.Opensearch) if diags.HasError() { return nil } - redisVar := schemautil.ExpandListBlockNested[tfoRedis, dtoRedis](ctx, diags, expandRedis, o.Redis) + redisVar := schemautil.ExpandSetBlockNested[tfoRedis, dtoRedis](ctx, diags, expandRedis, o.Redis) if diags.HasError() { return nil } @@ -286,40 +286,40 @@ func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserCo // flattenUserConfig flattens dto object into tf object func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { - datadogTagsVar := schemautil.FlattenListNested[dtoDatadogTags, tfoDatadogTags](ctx, diags, flattenDatadogTags, datadogTagsAttrs, o.DatadogTags) + datadogTagsVar := schemautil.FlattenSetNested[dtoDatadogTags, tfoDatadogTags](ctx, diags, flattenDatadogTags, datadogTagsAttrs, o.DatadogTags) if diags.HasError() { return nil } - excludeConsumerGroupsVar, d := types.ListValueFrom(ctx, types.StringType, o.ExcludeConsumerGroups) + excludeConsumerGroupsVar, d := types.SetValueFrom(ctx, types.StringType, o.ExcludeConsumerGroups) diags.Append(d...) if diags.HasError() { return nil } - excludeTopicsVar, d := types.ListValueFrom(ctx, types.StringType, o.ExcludeTopics) + excludeTopicsVar, d := types.SetValueFrom(ctx, types.StringType, o.ExcludeTopics) diags.Append(d...) if diags.HasError() { return nil } - includeConsumerGroupsVar, d := types.ListValueFrom(ctx, types.StringType, o.IncludeConsumerGroups) + includeConsumerGroupsVar, d := types.SetValueFrom(ctx, types.StringType, o.IncludeConsumerGroups) diags.Append(d...) if diags.HasError() { return nil } - includeTopicsVar, d := types.ListValueFrom(ctx, types.StringType, o.IncludeTopics) + includeTopicsVar, d := types.SetValueFrom(ctx, types.StringType, o.IncludeTopics) diags.Append(d...) if diags.HasError() { return nil } - kafkaCustomMetricsVar, d := types.ListValueFrom(ctx, types.StringType, o.KafkaCustomMetrics) + kafkaCustomMetricsVar, d := types.SetValueFrom(ctx, types.StringType, o.KafkaCustomMetrics) diags.Append(d...) if diags.HasError() { return nil } - opensearchVar := schemautil.FlattenListBlockNested[dtoOpensearch, tfoOpensearch](ctx, diags, flattenOpensearch, opensearchAttrs, o.Opensearch) + opensearchVar := schemautil.FlattenSetBlockNested[dtoOpensearch, tfoOpensearch](ctx, diags, flattenOpensearch, opensearchAttrs, o.Opensearch) if diags.HasError() { return nil } - redisVar := schemautil.FlattenListBlockNested[dtoRedis, tfoRedis](ctx, diags, flattenRedis, redisAttrs, o.Redis) + redisVar := schemautil.FlattenSetBlockNested[dtoRedis, tfoRedis](ctx, diags, flattenRedis, redisAttrs, o.Redis) if diags.HasError() { return nil } @@ -339,15 +339,15 @@ func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserC var userConfigAttrs = map[string]attr.Type{ "datadog_dbm_enabled": types.BoolType, - "datadog_tags": types.ListType{ElemType: types.ObjectType{AttrTypes: datadogTagsAttrs}}, - "exclude_consumer_groups": types.ListType{ElemType: types.StringType}, - "exclude_topics": types.ListType{ElemType: types.StringType}, - "include_consumer_groups": types.ListType{ElemType: types.StringType}, - "include_topics": types.ListType{ElemType: types.StringType}, - "kafka_custom_metrics": types.ListType{ElemType: types.StringType}, + "datadog_tags": types.SetType{ElemType: types.ObjectType{AttrTypes: datadogTagsAttrs}}, + "exclude_consumer_groups": types.SetType{ElemType: types.StringType}, + "exclude_topics": types.SetType{ElemType: types.StringType}, + "include_consumer_groups": types.SetType{ElemType: types.StringType}, + "include_topics": types.SetType{ElemType: types.StringType}, + "kafka_custom_metrics": types.SetType{ElemType: types.StringType}, "max_jmx_metrics": types.Int64Type, - "opensearch": types.ListType{ElemType: types.ObjectType{AttrTypes: opensearchAttrs}}, - "redis": types.ListType{ElemType: types.ObjectType{AttrTypes: redisAttrs}}, + "opensearch": types.SetType{ElemType: types.ObjectType{AttrTypes: opensearchAttrs}}, + "redis": types.SetType{ElemType: types.ObjectType{AttrTypes: redisAttrs}}, } // tfoDatadogTags Datadog tag defined by user @@ -444,17 +444,17 @@ func flattenRedis(ctx context.Context, diags *diag.Diagnostics, o *dtoRedis) *tf var redisAttrs = map[string]attr.Type{"command_stats_enabled": types.BoolType} // Expand public function that converts tf object into dto -func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { - return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig { + return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set) } // Flatten public function that converts dto into tf object -func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set { o := new(dtoUserConfig) err := schemautil.MapToDTO(m, o) if err != nil { diags.AddError("failed to marshal map user config to dto", err.Error()) - return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs}) } - return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) + return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) } diff --git a/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics.go b/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics.go index bc0a75e75..c49842976 100644 --- a/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics.go +++ b/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics.go @@ -5,7 +5,7 @@ package externalawscloudwatchmetrics import ( "context" - listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" attr "github.com/hashicorp/terraform-plugin-framework/attr" datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" diag "github.com/hashicorp/terraform-plugin-framework/diag" @@ -17,11 +17,11 @@ import ( ) // NewResourceSchema returns resource schema -func NewResourceSchema() resource.ListNestedBlock { - return resource.ListNestedBlock{ +func NewResourceSchema() resource.SetNestedBlock { + return resource.SetNestedBlock{ Description: "External AWS CloudWatch Metrics integration user config", NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{ - "dropped_metrics": resource.ListNestedBlock{ + "dropped_metrics": resource.SetNestedBlock{ Description: "Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics)", NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ "field": resource.StringAttribute{ @@ -33,9 +33,9 @@ func NewResourceSchema() resource.ListNestedBlock { Required: true, }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, - "extra_metrics": resource.ListNestedBlock{ + "extra_metrics": resource.SetNestedBlock{ Description: "Metrics to allow through to AWS CloudWatch (in addition to default metrics)", NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ "field": resource.StringAttribute{ @@ -47,19 +47,19 @@ func NewResourceSchema() resource.ListNestedBlock { Required: true, }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // NewDataSourceSchema returns datasource schema -func NewDataSourceSchema() datasource.ListNestedBlock { - return datasource.ListNestedBlock{ +func NewDataSourceSchema() datasource.SetNestedBlock { + return datasource.SetNestedBlock{ Description: "External AWS CloudWatch Metrics integration user config", NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{ - "dropped_metrics": datasource.ListNestedBlock{ + "dropped_metrics": datasource.SetNestedBlock{ Description: "Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics)", NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ "field": datasource.StringAttribute{ @@ -71,9 +71,9 @@ func NewDataSourceSchema() datasource.ListNestedBlock { Description: "Identifier of the metric.", }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, - "extra_metrics": datasource.ListNestedBlock{ + "extra_metrics": datasource.SetNestedBlock{ Description: "Metrics to allow through to AWS CloudWatch (in addition to default metrics)", NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ "field": datasource.StringAttribute{ @@ -85,17 +85,17 @@ func NewDataSourceSchema() datasource.ListNestedBlock { Description: "Identifier of the metric.", }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // tfoUserConfig External AWS CloudWatch Metrics integration user config type tfoUserConfig struct { - DroppedMetrics types.List `tfsdk:"dropped_metrics"` - ExtraMetrics types.List `tfsdk:"extra_metrics"` + DroppedMetrics types.Set `tfsdk:"dropped_metrics"` + ExtraMetrics types.Set `tfsdk:"extra_metrics"` } // dtoUserConfig request/response object @@ -106,11 +106,11 @@ type dtoUserConfig struct { // expandUserConfig expands tf object into dto object func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { - droppedMetricsVar := schemautil.ExpandListNested[tfoDroppedMetrics, dtoDroppedMetrics](ctx, diags, expandDroppedMetrics, o.DroppedMetrics) + droppedMetricsVar := schemautil.ExpandSetNested[tfoDroppedMetrics, dtoDroppedMetrics](ctx, diags, expandDroppedMetrics, o.DroppedMetrics) if diags.HasError() { return nil } - extraMetricsVar := schemautil.ExpandListNested[tfoExtraMetrics, dtoExtraMetrics](ctx, diags, expandExtraMetrics, o.ExtraMetrics) + extraMetricsVar := schemautil.ExpandSetNested[tfoExtraMetrics, dtoExtraMetrics](ctx, diags, expandExtraMetrics, o.ExtraMetrics) if diags.HasError() { return nil } @@ -122,11 +122,11 @@ func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserCo // flattenUserConfig flattens dto object into tf object func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { - droppedMetricsVar := schemautil.FlattenListNested[dtoDroppedMetrics, tfoDroppedMetrics](ctx, diags, flattenDroppedMetrics, droppedMetricsAttrs, o.DroppedMetrics) + droppedMetricsVar := schemautil.FlattenSetNested[dtoDroppedMetrics, tfoDroppedMetrics](ctx, diags, flattenDroppedMetrics, droppedMetricsAttrs, o.DroppedMetrics) if diags.HasError() { return nil } - extraMetricsVar := schemautil.FlattenListNested[dtoExtraMetrics, tfoExtraMetrics](ctx, diags, flattenExtraMetrics, extraMetricsAttrs, o.ExtraMetrics) + extraMetricsVar := schemautil.FlattenSetNested[dtoExtraMetrics, tfoExtraMetrics](ctx, diags, flattenExtraMetrics, extraMetricsAttrs, o.ExtraMetrics) if diags.HasError() { return nil } @@ -137,8 +137,8 @@ func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserC } var userConfigAttrs = map[string]attr.Type{ - "dropped_metrics": types.ListType{ElemType: types.ObjectType{AttrTypes: droppedMetricsAttrs}}, - "extra_metrics": types.ListType{ElemType: types.ObjectType{AttrTypes: extraMetricsAttrs}}, + "dropped_metrics": types.SetType{ElemType: types.ObjectType{AttrTypes: droppedMetricsAttrs}}, + "extra_metrics": types.SetType{ElemType: types.ObjectType{AttrTypes: extraMetricsAttrs}}, } // tfoDroppedMetrics Metric name and subfield @@ -208,17 +208,17 @@ var extraMetricsAttrs = map[string]attr.Type{ } // Expand public function that converts tf object into dto -func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { - return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig { + return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set) } // Flatten public function that converts dto into tf object -func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set { o := new(dtoUserConfig) err := schemautil.MapToDTO(m, o) if err != nil { diags.AddError("failed to marshal map user config to dto", err.Error()) - return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs}) } - return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) + return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) } diff --git a/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect.go b/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect.go index 7593af6cf..9da78cb31 100644 --- a/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect.go +++ b/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect.go @@ -5,7 +5,7 @@ package kafkaconnect import ( "context" - listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" attr "github.com/hashicorp/terraform-plugin-framework/attr" datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" diag "github.com/hashicorp/terraform-plugin-framework/diag" @@ -17,10 +17,10 @@ import ( ) // NewResourceSchema returns resource schema -func NewResourceSchema() resource.ListNestedBlock { - return resource.ListNestedBlock{ +func NewResourceSchema() resource.SetNestedBlock { + return resource.SetNestedBlock{ Description: "Integration user config", - NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"kafka_connect": resource.ListNestedBlock{ + NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"kafka_connect": resource.SetNestedBlock{ Description: "Kafka Connect service configuration values", NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ "config_storage_topic": resource.StringAttribute{ @@ -45,15 +45,15 @@ func NewResourceSchema() resource.ListNestedBlock { }, }}, }}}, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // NewDataSourceSchema returns datasource schema -func NewDataSourceSchema() datasource.ListNestedBlock { - return datasource.ListNestedBlock{ +func NewDataSourceSchema() datasource.SetNestedBlock { + return datasource.SetNestedBlock{ Description: "Integration user config", - NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"kafka_connect": datasource.ListNestedBlock{ + NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"kafka_connect": datasource.SetNestedBlock{ Description: "Kafka Connect service configuration values", NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ "config_storage_topic": datasource.StringAttribute{ @@ -74,13 +74,13 @@ func NewDataSourceSchema() datasource.ListNestedBlock { }, }}, }}}, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // tfoUserConfig Integration user config type tfoUserConfig struct { - KafkaConnect types.List `tfsdk:"kafka_connect"` + KafkaConnect types.Set `tfsdk:"kafka_connect"` } // dtoUserConfig request/response object @@ -90,7 +90,7 @@ type dtoUserConfig struct { // expandUserConfig expands tf object into dto object func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { - kafkaConnectVar := schemautil.ExpandListBlockNested[tfoKafkaConnect, dtoKafkaConnect](ctx, diags, expandKafkaConnect, o.KafkaConnect) + kafkaConnectVar := schemautil.ExpandSetBlockNested[tfoKafkaConnect, dtoKafkaConnect](ctx, diags, expandKafkaConnect, o.KafkaConnect) if diags.HasError() { return nil } @@ -99,14 +99,14 @@ func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserCo // flattenUserConfig flattens dto object into tf object func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { - kafkaConnectVar := schemautil.FlattenListBlockNested[dtoKafkaConnect, tfoKafkaConnect](ctx, diags, flattenKafkaConnect, kafkaConnectAttrs, o.KafkaConnect) + kafkaConnectVar := schemautil.FlattenSetBlockNested[dtoKafkaConnect, tfoKafkaConnect](ctx, diags, flattenKafkaConnect, kafkaConnectAttrs, o.KafkaConnect) if diags.HasError() { return nil } return &tfoUserConfig{KafkaConnect: kafkaConnectVar} } -var userConfigAttrs = map[string]attr.Type{"kafka_connect": types.ListType{ElemType: types.ObjectType{AttrTypes: kafkaConnectAttrs}}} +var userConfigAttrs = map[string]attr.Type{"kafka_connect": types.SetType{ElemType: types.ObjectType{AttrTypes: kafkaConnectAttrs}}} // tfoKafkaConnect Kafka Connect service configuration values type tfoKafkaConnect struct { @@ -152,17 +152,17 @@ var kafkaConnectAttrs = map[string]attr.Type{ } // Expand public function that converts tf object into dto -func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { - return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig { + return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set) } // Flatten public function that converts dto into tf object -func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set { o := new(dtoUserConfig) err := schemautil.MapToDTO(m, o) if err != nil { diags.AddError("failed to marshal map user config to dto", err.Error()) - return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs}) } - return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) + return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) } diff --git a/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs.go b/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs.go index b1c8cb565..9b42c841b 100644 --- a/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs.go +++ b/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs.go @@ -5,7 +5,7 @@ package kafkalogs import ( "context" - listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" attr "github.com/hashicorp/terraform-plugin-framework/attr" datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" diag "github.com/hashicorp/terraform-plugin-framework/diag" @@ -17,48 +17,48 @@ import ( ) // NewResourceSchema returns resource schema -func NewResourceSchema() resource.ListNestedBlock { - return resource.ListNestedBlock{ +func NewResourceSchema() resource.SetNestedBlock { + return resource.SetNestedBlock{ NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ "kafka_topic": resource.StringAttribute{ Description: "Topic name.", Required: true, }, - "selected_log_fields": resource.ListAttribute{ + "selected_log_fields": resource.SetAttribute{ Computed: true, Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", ElementType: types.StringType, Optional: true, - Validators: []validator.List{listvalidator.SizeAtMost(5)}, + Validators: []validator.Set{setvalidator.SizeAtMost(5)}, }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // NewDataSourceSchema returns datasource schema -func NewDataSourceSchema() datasource.ListNestedBlock { - return datasource.ListNestedBlock{ +func NewDataSourceSchema() datasource.SetNestedBlock { + return datasource.SetNestedBlock{ NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ "kafka_topic": datasource.StringAttribute{ Computed: true, Description: "Topic name.", }, - "selected_log_fields": datasource.ListAttribute{ + "selected_log_fields": datasource.SetAttribute{ Computed: true, Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", ElementType: types.StringType, - Validators: []validator.List{listvalidator.SizeAtMost(5)}, + Validators: []validator.Set{setvalidator.SizeAtMost(5)}, }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // tfoUserConfig type tfoUserConfig struct { KafkaTopic types.String `tfsdk:"kafka_topic"` - SelectedLogFields types.List `tfsdk:"selected_log_fields"` + SelectedLogFields types.Set `tfsdk:"selected_log_fields"` } // dtoUserConfig request/response object @@ -69,7 +69,7 @@ type dtoUserConfig struct { // expandUserConfig expands tf object into dto object func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { - selectedLogFieldsVar := schemautil.ExpandList[string](ctx, diags, o.SelectedLogFields) + selectedLogFieldsVar := schemautil.ExpandSet[string](ctx, diags, o.SelectedLogFields) if diags.HasError() { return nil } @@ -81,7 +81,7 @@ func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserCo // flattenUserConfig flattens dto object into tf object func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { - selectedLogFieldsVar, d := types.ListValueFrom(ctx, types.StringType, o.SelectedLogFields) + selectedLogFieldsVar, d := types.SetValueFrom(ctx, types.StringType, o.SelectedLogFields) diags.Append(d...) if diags.HasError() { return nil @@ -94,21 +94,21 @@ func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserC var userConfigAttrs = map[string]attr.Type{ "kafka_topic": types.StringType, - "selected_log_fields": types.ListType{ElemType: types.StringType}, + "selected_log_fields": types.SetType{ElemType: types.StringType}, } // Expand public function that converts tf object into dto -func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { - return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig { + return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set) } // Flatten public function that converts dto into tf object -func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set { o := new(dtoUserConfig) err := schemautil.MapToDTO(m, o) if err != nil { diags.AddError("failed to marshal map user config to dto", err.Error()) - return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs}) } - return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) + return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) } diff --git a/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker.go b/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker.go index 548514588..de8db7181 100644 --- a/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker.go +++ b/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker.go @@ -5,7 +5,7 @@ package kafkamirrormaker import ( "context" - listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" attr "github.com/hashicorp/terraform-plugin-framework/attr" datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" diag "github.com/hashicorp/terraform-plugin-framework/diag" @@ -17,8 +17,8 @@ import ( ) // NewResourceSchema returns resource schema -func NewResourceSchema() resource.ListNestedBlock { - return resource.ListNestedBlock{ +func NewResourceSchema() resource.SetNestedBlock { + return resource.SetNestedBlock{ Description: "Integration user config", NestedObject: resource.NestedBlockObject{ Attributes: map[string]resource.Attribute{"cluster_alias": resource.StringAttribute{ @@ -26,7 +26,7 @@ func NewResourceSchema() resource.ListNestedBlock { Description: "The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, '.', '_', and '-'.", Optional: true, }}, - Blocks: map[string]resource.Block{"kafka_mirrormaker": resource.ListNestedBlock{ + Blocks: map[string]resource.Block{"kafka_mirrormaker": resource.SetNestedBlock{ Description: "Kafka MirrorMaker configuration values", NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ "consumer_fetch_min_bytes": resource.Int64Attribute{ @@ -62,20 +62,20 @@ func NewResourceSchema() resource.ListNestedBlock { }}, }}, }, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // NewDataSourceSchema returns datasource schema -func NewDataSourceSchema() datasource.ListNestedBlock { - return datasource.ListNestedBlock{ +func NewDataSourceSchema() datasource.SetNestedBlock { + return datasource.SetNestedBlock{ Description: "Integration user config", NestedObject: datasource.NestedBlockObject{ Attributes: map[string]datasource.Attribute{"cluster_alias": datasource.StringAttribute{ Computed: true, Description: "The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, '.', '_', and '-'.", }}, - Blocks: map[string]datasource.Block{"kafka_mirrormaker": datasource.ListNestedBlock{ + Blocks: map[string]datasource.Block{"kafka_mirrormaker": datasource.SetNestedBlock{ Description: "Kafka MirrorMaker configuration values", NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ "consumer_fetch_min_bytes": datasource.Int64Attribute{ @@ -105,14 +105,14 @@ func NewDataSourceSchema() datasource.ListNestedBlock { }}, }}, }, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // tfoUserConfig Integration user config type tfoUserConfig struct { ClusterAlias types.String `tfsdk:"cluster_alias"` - KafkaMirrormaker types.List `tfsdk:"kafka_mirrormaker"` + KafkaMirrormaker types.Set `tfsdk:"kafka_mirrormaker"` } // dtoUserConfig request/response object @@ -123,7 +123,7 @@ type dtoUserConfig struct { // expandUserConfig expands tf object into dto object func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { - kafkaMirrormakerVar := schemautil.ExpandListBlockNested[tfoKafkaMirrormaker, dtoKafkaMirrormaker](ctx, diags, expandKafkaMirrormaker, o.KafkaMirrormaker) + kafkaMirrormakerVar := schemautil.ExpandSetBlockNested[tfoKafkaMirrormaker, dtoKafkaMirrormaker](ctx, diags, expandKafkaMirrormaker, o.KafkaMirrormaker) if diags.HasError() { return nil } @@ -135,7 +135,7 @@ func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserCo // flattenUserConfig flattens dto object into tf object func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { - kafkaMirrormakerVar := schemautil.FlattenListBlockNested[dtoKafkaMirrormaker, tfoKafkaMirrormaker](ctx, diags, flattenKafkaMirrormaker, kafkaMirrormakerAttrs, o.KafkaMirrormaker) + kafkaMirrormakerVar := schemautil.FlattenSetBlockNested[dtoKafkaMirrormaker, tfoKafkaMirrormaker](ctx, diags, flattenKafkaMirrormaker, kafkaMirrormakerAttrs, o.KafkaMirrormaker) if diags.HasError() { return nil } @@ -147,7 +147,7 @@ func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserC var userConfigAttrs = map[string]attr.Type{ "cluster_alias": types.StringType, - "kafka_mirrormaker": types.ListType{ElemType: types.ObjectType{AttrTypes: kafkaMirrormakerAttrs}}, + "kafka_mirrormaker": types.SetType{ElemType: types.ObjectType{AttrTypes: kafkaMirrormakerAttrs}}, } // tfoKafkaMirrormaker Kafka MirrorMaker configuration values @@ -204,17 +204,17 @@ var kafkaMirrormakerAttrs = map[string]attr.Type{ } // Expand public function that converts tf object into dto -func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { - return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig { + return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set) } // Flatten public function that converts dto into tf object -func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set { o := new(dtoUserConfig) err := schemautil.MapToDTO(m, o) if err != nil { diags.AddError("failed to marshal map user config to dto", err.Error()) - return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs}) } - return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) + return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) } diff --git a/internal/plugin/service/userconfig/integration/logs/logs.go b/internal/plugin/service/userconfig/integration/logs/logs.go index 7a9be7d51..1916dcdcc 100644 --- a/internal/plugin/service/userconfig/integration/logs/logs.go +++ b/internal/plugin/service/userconfig/integration/logs/logs.go @@ -5,7 +5,7 @@ package logs import ( "context" - listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" attr "github.com/hashicorp/terraform-plugin-framework/attr" datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" diag "github.com/hashicorp/terraform-plugin-framework/diag" @@ -19,8 +19,8 @@ import ( ) // NewResourceSchema returns resource schema -func NewResourceSchema() resource.ListNestedBlock { - return resource.ListNestedBlock{ +func NewResourceSchema() resource.SetNestedBlock { + return resource.SetNestedBlock{ NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ "elasticsearch_index_days_max": resource.Int64Attribute{ Computed: true, @@ -34,21 +34,21 @@ func NewResourceSchema() resource.ListNestedBlock { Description: "Elasticsearch index prefix. The default value is `logs`.", Optional: true, }, - "selected_log_fields": resource.ListAttribute{ + "selected_log_fields": resource.SetAttribute{ Computed: true, Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", ElementType: types.StringType, Optional: true, - Validators: []validator.List{listvalidator.SizeAtMost(5)}, + Validators: []validator.Set{setvalidator.SizeAtMost(5)}, }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // NewDataSourceSchema returns datasource schema -func NewDataSourceSchema() datasource.ListNestedBlock { - return datasource.ListNestedBlock{ +func NewDataSourceSchema() datasource.SetNestedBlock { + return datasource.SetNestedBlock{ NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ "elasticsearch_index_days_max": datasource.Int64Attribute{ Computed: true, @@ -58,14 +58,14 @@ func NewDataSourceSchema() datasource.ListNestedBlock { Computed: true, Description: "Elasticsearch index prefix. The default value is `logs`.", }, - "selected_log_fields": datasource.ListAttribute{ + "selected_log_fields": datasource.SetAttribute{ Computed: true, Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", ElementType: types.StringType, - Validators: []validator.List{listvalidator.SizeAtMost(5)}, + Validators: []validator.Set{setvalidator.SizeAtMost(5)}, }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } @@ -73,7 +73,7 @@ func NewDataSourceSchema() datasource.ListNestedBlock { type tfoUserConfig struct { ElasticsearchIndexDaysMax types.Int64 `tfsdk:"elasticsearch_index_days_max"` ElasticsearchIndexPrefix types.String `tfsdk:"elasticsearch_index_prefix"` - SelectedLogFields types.List `tfsdk:"selected_log_fields"` + SelectedLogFields types.Set `tfsdk:"selected_log_fields"` } // dtoUserConfig request/response object @@ -85,7 +85,7 @@ type dtoUserConfig struct { // expandUserConfig expands tf object into dto object func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { - selectedLogFieldsVar := schemautil.ExpandList[string](ctx, diags, o.SelectedLogFields) + selectedLogFieldsVar := schemautil.ExpandSet[string](ctx, diags, o.SelectedLogFields) if diags.HasError() { return nil } @@ -98,7 +98,7 @@ func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserCo // flattenUserConfig flattens dto object into tf object func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { - selectedLogFieldsVar, d := types.ListValueFrom(ctx, types.StringType, o.SelectedLogFields) + selectedLogFieldsVar, d := types.SetValueFrom(ctx, types.StringType, o.SelectedLogFields) diags.Append(d...) if diags.HasError() { return nil @@ -113,21 +113,21 @@ func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserC var userConfigAttrs = map[string]attr.Type{ "elasticsearch_index_days_max": types.Int64Type, "elasticsearch_index_prefix": types.StringType, - "selected_log_fields": types.ListType{ElemType: types.StringType}, + "selected_log_fields": types.SetType{ElemType: types.StringType}, } // Expand public function that converts tf object into dto -func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { - return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig { + return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set) } // Flatten public function that converts dto into tf object -func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set { o := new(dtoUserConfig) err := schemautil.MapToDTO(m, o) if err != nil { diags.AddError("failed to marshal map user config to dto", err.Error()) - return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs}) } - return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) + return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) } diff --git a/internal/plugin/service/userconfig/integration/metrics/metrics.go b/internal/plugin/service/userconfig/integration/metrics/metrics.go index 7670348fc..3fb712119 100644 --- a/internal/plugin/service/userconfig/integration/metrics/metrics.go +++ b/internal/plugin/service/userconfig/integration/metrics/metrics.go @@ -5,7 +5,7 @@ package metrics import ( "context" - listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" attr "github.com/hashicorp/terraform-plugin-framework/attr" datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" diag "github.com/hashicorp/terraform-plugin-framework/diag" @@ -17,8 +17,8 @@ import ( ) // NewResourceSchema returns resource schema -func NewResourceSchema() resource.ListNestedBlock { - return resource.ListNestedBlock{ +func NewResourceSchema() resource.SetNestedBlock { + return resource.SetNestedBlock{ Description: "Integration user config", NestedObject: resource.NestedBlockObject{ Attributes: map[string]resource.Attribute{ @@ -43,9 +43,9 @@ func NewResourceSchema() resource.ListNestedBlock { Optional: true, }, }, - Blocks: map[string]resource.Block{"source_mysql": resource.ListNestedBlock{ + Blocks: map[string]resource.Block{"source_mysql": resource.SetNestedBlock{ Description: "Configuration options for metrics where source service is MySQL", - NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"telegraf": resource.ListNestedBlock{ + NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"telegraf": resource.SetNestedBlock{ Description: "Configuration options for Telegraf MySQL input plugin", NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ "gather_event_waits": resource.BoolAttribute{ @@ -122,13 +122,13 @@ func NewResourceSchema() resource.ListNestedBlock { }}}, }}, }, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // NewDataSourceSchema returns datasource schema -func NewDataSourceSchema() datasource.ListNestedBlock { - return datasource.ListNestedBlock{ +func NewDataSourceSchema() datasource.SetNestedBlock { + return datasource.SetNestedBlock{ Description: "Integration user config", NestedObject: datasource.NestedBlockObject{ Attributes: map[string]datasource.Attribute{ @@ -149,9 +149,9 @@ func NewDataSourceSchema() datasource.ListNestedBlock { Description: "Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to 'metrics_writer'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.", }, }, - Blocks: map[string]datasource.Block{"source_mysql": datasource.ListNestedBlock{ + Blocks: map[string]datasource.Block{"source_mysql": datasource.SetNestedBlock{ Description: "Configuration options for metrics where source service is MySQL", - NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"telegraf": datasource.ListNestedBlock{ + NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"telegraf": datasource.SetNestedBlock{ Description: "Configuration options for Telegraf MySQL input plugin", NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ "gather_event_waits": datasource.BoolAttribute{ @@ -214,7 +214,7 @@ func NewDataSourceSchema() datasource.ListNestedBlock { }}}, }}, }, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } @@ -223,7 +223,7 @@ type tfoUserConfig struct { Database types.String `tfsdk:"database"` RetentionDays types.Int64 `tfsdk:"retention_days"` RoUsername types.String `tfsdk:"ro_username"` - SourceMysql types.List `tfsdk:"source_mysql"` + SourceMysql types.Set `tfsdk:"source_mysql"` Username types.String `tfsdk:"username"` } @@ -238,7 +238,7 @@ type dtoUserConfig struct { // expandUserConfig expands tf object into dto object func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { - sourceMysqlVar := schemautil.ExpandListBlockNested[tfoSourceMysql, dtoSourceMysql](ctx, diags, expandSourceMysql, o.SourceMysql) + sourceMysqlVar := schemautil.ExpandSetBlockNested[tfoSourceMysql, dtoSourceMysql](ctx, diags, expandSourceMysql, o.SourceMysql) if diags.HasError() { return nil } @@ -253,7 +253,7 @@ func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserCo // flattenUserConfig flattens dto object into tf object func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { - sourceMysqlVar := schemautil.FlattenListBlockNested[dtoSourceMysql, tfoSourceMysql](ctx, diags, flattenSourceMysql, sourceMysqlAttrs, o.SourceMysql) + sourceMysqlVar := schemautil.FlattenSetBlockNested[dtoSourceMysql, tfoSourceMysql](ctx, diags, flattenSourceMysql, sourceMysqlAttrs, o.SourceMysql) if diags.HasError() { return nil } @@ -270,13 +270,13 @@ var userConfigAttrs = map[string]attr.Type{ "database": types.StringType, "retention_days": types.Int64Type, "ro_username": types.StringType, - "source_mysql": types.ListType{ElemType: types.ObjectType{AttrTypes: sourceMysqlAttrs}}, + "source_mysql": types.SetType{ElemType: types.ObjectType{AttrTypes: sourceMysqlAttrs}}, "username": types.StringType, } // tfoSourceMysql Configuration options for metrics where source service is MySQL type tfoSourceMysql struct { - Telegraf types.List `tfsdk:"telegraf"` + Telegraf types.Set `tfsdk:"telegraf"` } // dtoSourceMysql request/response object @@ -286,7 +286,7 @@ type dtoSourceMysql struct { // expandSourceMysql expands tf object into dto object func expandSourceMysql(ctx context.Context, diags *diag.Diagnostics, o *tfoSourceMysql) *dtoSourceMysql { - telegrafVar := schemautil.ExpandListBlockNested[tfoTelegraf, dtoTelegraf](ctx, diags, expandTelegraf, o.Telegraf) + telegrafVar := schemautil.ExpandSetBlockNested[tfoTelegraf, dtoTelegraf](ctx, diags, expandTelegraf, o.Telegraf) if diags.HasError() { return nil } @@ -295,14 +295,14 @@ func expandSourceMysql(ctx context.Context, diags *diag.Diagnostics, o *tfoSourc // flattenSourceMysql flattens dto object into tf object func flattenSourceMysql(ctx context.Context, diags *diag.Diagnostics, o *dtoSourceMysql) *tfoSourceMysql { - telegrafVar := schemautil.FlattenListBlockNested[dtoTelegraf, tfoTelegraf](ctx, diags, flattenTelegraf, telegrafAttrs, o.Telegraf) + telegrafVar := schemautil.FlattenSetBlockNested[dtoTelegraf, tfoTelegraf](ctx, diags, flattenTelegraf, telegrafAttrs, o.Telegraf) if diags.HasError() { return nil } return &tfoSourceMysql{Telegraf: telegrafVar} } -var sourceMysqlAttrs = map[string]attr.Type{"telegraf": types.ListType{ElemType: types.ObjectType{AttrTypes: telegrafAttrs}}} +var sourceMysqlAttrs = map[string]attr.Type{"telegraf": types.SetType{ElemType: types.ObjectType{AttrTypes: telegrafAttrs}}} // tfoTelegraf Configuration options for Telegraf MySQL input plugin type tfoTelegraf struct { @@ -398,17 +398,17 @@ var telegrafAttrs = map[string]attr.Type{ } // Expand public function that converts tf object into dto -func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { - return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig { + return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set) } // Flatten public function that converts dto into tf object -func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set { o := new(dtoUserConfig) err := schemautil.MapToDTO(m, o) if err != nil { diags.AddError("failed to marshal map user config to dto", err.Error()) - return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs}) } - return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) + return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) } diff --git a/internal/schemautil/plugin.go b/internal/schemautil/plugin.go index b06dd8635..6f94a84e5 100644 --- a/internal/schemautil/plugin.go +++ b/internal/schemautil/plugin.go @@ -11,7 +11,7 @@ import ( "github.com/liip/sheriff" ) -func ExpandList[T any](ctx context.Context, diags *diag.Diagnostics, list types.List) (items []T) { +func ExpandSet[T any](ctx context.Context, diags *diag.Diagnostics, list types.Set) (items []T) { if list.IsUnknown() || list.IsNull() { return nil } @@ -21,8 +21,8 @@ func ExpandList[T any](ctx context.Context, diags *diag.Diagnostics, list types. type Expander[T, K any] func(ctx context.Context, diags *diag.Diagnostics, o *T) *K -func ExpandListNested[T, K any](ctx context.Context, diags *diag.Diagnostics, expand Expander[T, K], list types.List) []*K { - expanded := ExpandList[T](ctx, diags, list) +func ExpandSetNested[T, K any](ctx context.Context, diags *diag.Diagnostics, expand Expander[T, K], list types.Set) []*K { + expanded := ExpandSet[T](ctx, diags, list) if expanded == nil || diags.HasError() { return nil } @@ -37,8 +37,8 @@ func ExpandListNested[T, K any](ctx context.Context, diags *diag.Diagnostics, ex return items } -func ExpandListBlockNested[T, K any](ctx context.Context, diags *diag.Diagnostics, expand Expander[T, K], list types.List) *K { - items := ExpandListNested(ctx, diags, expand, list) +func ExpandSetBlockNested[T, K any](ctx context.Context, diags *diag.Diagnostics, expand Expander[T, K], list types.Set) *K { + items := ExpandSetNested(ctx, diags, expand, list) if len(items) == 0 { return nil } @@ -47,9 +47,9 @@ func ExpandListBlockNested[T, K any](ctx context.Context, diags *diag.Diagnostic type Flattener[T, K any] func(ctx context.Context, diags *diag.Diagnostics, o *T) *K -func FlattenListNested[T, K any](ctx context.Context, diags *diag.Diagnostics, flatten Flattener[T, K], attrs map[string]attr.Type, list []*T) types.List { +func FlattenSetNested[T, K any](ctx context.Context, diags *diag.Diagnostics, flatten Flattener[T, K], attrs map[string]attr.Type, list []*T) types.Set { oType := types.ObjectType{AttrTypes: attrs} - empty := types.ListValueMust(oType, []attr.Value{}) + empty := types.SetValueMust(oType, []attr.Value{}) items := make([]*K, 0, len(list)) for _, v := range list { items = append(items, flatten(ctx, diags, v)) @@ -58,7 +58,7 @@ func FlattenListNested[T, K any](ctx context.Context, diags *diag.Diagnostics, f } } - result, d := types.ListValueFrom(ctx, oType, items) + result, d := types.SetValueFrom(ctx, oType, items) diags.Append(d...) if diags.HasError() { return empty @@ -66,11 +66,11 @@ func FlattenListNested[T, K any](ctx context.Context, diags *diag.Diagnostics, f return result } -func FlattenListBlockNested[T, K any](ctx context.Context, diags *diag.Diagnostics, flatten Flattener[T, K], attrs map[string]attr.Type, o *T) types.List { +func FlattenSetBlockNested[T, K any](ctx context.Context, diags *diag.Diagnostics, flatten Flattener[T, K], attrs map[string]attr.Type, o *T) types.Set { if o == nil { - return types.ListValueMust(types.ObjectType{AttrTypes: attrs}, []attr.Value{}) + return types.SetValueMust(types.ObjectType{AttrTypes: attrs}, []attr.Value{}) } - return FlattenListNested(ctx, diags, flatten, attrs, []*T{o}) + return FlattenSetNested(ctx, diags, flatten, attrs, []*T{o}) } // marshalUserConfig converts user config into json diff --git a/internal/sdkprovider/service/serviceintegration/service_integration.go b/internal/sdkprovider/service/serviceintegration/service_integration.go deleted file mode 100644 index 72a4e7042..000000000 --- a/internal/sdkprovider/service/serviceintegration/service_integration.go +++ /dev/null @@ -1,398 +0,0 @@ -package serviceintegration - -import ( - "context" - "fmt" - "log" - "regexp" - "time" - - "github.com/aiven/aiven-go-client/v2" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/hashicorp/terraform-plugin-testing/helper/resource" - - "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/apiconvert" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" -) - -const serviceIntegrationEndpointRegExp = "^[a-zA-Z0-9_-]*\\/{1}[a-zA-Z0-9_-]*$" - -var integrationTypes = []string{ - "alertmanager", - "cassandra_cross_service_cluster", - "clickhouse_kafka", - "clickhouse_postgresql", - "dashboard", - "datadog", - "datasource", - "external_aws_cloudwatch_logs", - "external_aws_cloudwatch_metrics", - "external_elasticsearch_logs", - "external_google_cloud_logging", - "external_opensearch_logs", - "flink", - "internal_connectivity", - "jolokia", - "kafka_connect", - "kafka_logs", - "kafka_mirrormaker", - "logs", - "m3aggregator", - "m3coordinator", - "metrics", - "opensearch_cross_cluster_replication", - "opensearch_cross_cluster_search", - "prometheus", - "read_replica", - "rsyslog", - "schema_registry_proxy", -} - -var aivenServiceIntegrationSchema = map[string]*schema.Schema{ - "integration_id": { - Description: "Service Integration Id at aiven", - Computed: true, - Type: schema.TypeString, - }, - "destination_endpoint_id": { - Description: "Destination endpoint for the integration (if any)", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - ValidateFunc: validation.StringMatch(regexp.MustCompile(serviceIntegrationEndpointRegExp), - "endpoint id should have the following format: project_name/endpoint_id"), - }, - "destination_service_name": { - Description: "Destination service for the integration (if any)", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "integration_type": { - Description: "Type of the service integration. Possible values: " + schemautil.JoinQuoted(integrationTypes, ", ", "`"), - ForceNew: true, - Required: true, - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(integrationTypes, false), - }, - "project": { - Description: "Project the integration belongs to", - ForceNew: true, - Required: true, - Type: schema.TypeString, - }, - "source_endpoint_id": { - Description: "Source endpoint for the integration (if any)", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - ValidateFunc: validation.StringMatch(regexp.MustCompile(serviceIntegrationEndpointRegExp), - "endpoint id should have the following format: project_name/endpoint_id"), - }, - "source_service_name": { - Description: "Source service for the integration (if any)", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "logs_user_config": dist.IntegrationTypeLogs(), - "kafka_mirrormaker_user_config": dist.IntegrationTypeKafkaMirrormaker(), - "kafka_connect_user_config": dist.IntegrationTypeKafkaConnect(), - "kafka_logs_user_config": dist.IntegrationTypeKafkaLogs(), - "metrics_user_config": dist.IntegrationTypeMetrics(), - "datadog_user_config": dist.IntegrationTypeDatadog(), - "clickhouse_kafka_user_config": dist.IntegrationTypeClickhouseKafka(), - "clickhouse_postgresql_user_config": dist.IntegrationTypeClickhousePostgresql(), - "external_aws_cloudwatch_metrics_user_config": dist.IntegrationTypeExternalAwsCloudwatchMetrics(), -} - -func ResourceServiceIntegration() *schema.Resource { - return &schema.Resource{ - Description: "The Service Integration resource allows the creation and management of Aiven Service Integrations.", - CreateContext: resourceServiceIntegrationCreate, - ReadContext: resourceServiceIntegrationRead, - UpdateContext: resourceServiceIntegrationUpdate, - DeleteContext: resourceServiceIntegrationDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Timeouts: schemautil.DefaultResourceTimeouts(), - - Schema: aivenServiceIntegrationSchema, - SchemaVersion: 1, - StateUpgraders: stateupgrader.ServiceIntegration(), - } -} - -func plainEndpointID(fullEndpointID *string) *string { - if fullEndpointID == nil { - return nil - } - _, endpointID, err := schemautil.SplitResourceID2(*fullEndpointID) - if err != nil { - return nil - } - return &endpointID -} - -func resourceServiceIntegrationCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - client := m.(*aiven.Client) - - projectName := d.Get("project").(string) - integrationType := d.Get("integration_type").(string) - - // read_replicas can be only be created alongside the service. also the only way to promote the replica - // is to delete the service integration that was created so we should make it least painful to do so. - // for now we support to seemlessly import preexisting 'read_replica' service integrations in the resource create - // all other integrations should be imported using `terraform import` - if integrationType == "read_replica" { - if preexisting, err := resourceServiceIntegrationCheckForPreexistingResource(ctx, d, m); err != nil { - return diag.Errorf("unable to search for possible preexisting 'read_replica' service integration: %s", err) - } else if preexisting != nil { - d.SetId(schemautil.BuildResourceID(projectName, preexisting.ServiceIntegrationID)) - return resourceServiceIntegrationRead(ctx, d, m) - } - } - - uc, err := resourceServiceIntegrationUserConfigFromSchemaToAPI(d) - if err != nil { - return diag.FromErr(err) - } - - integration, err := client.ServiceIntegrations.Create( - ctx, - projectName, - aiven.CreateServiceIntegrationRequest{ - DestinationEndpointID: plainEndpointID(schemautil.OptionalStringPointer(d, "destination_endpoint_id")), - DestinationService: schemautil.OptionalStringPointer(d, "destination_service_name"), - IntegrationType: integrationType, - SourceEndpointID: plainEndpointID(schemautil.OptionalStringPointer(d, "source_endpoint_id")), - SourceService: schemautil.OptionalStringPointer(d, "source_service_name"), - UserConfig: uc, - }, - ) - if err != nil { - return diag.Errorf("error creating serivce integration: %s", err) - } - d.SetId(schemautil.BuildResourceID(projectName, integration.ServiceIntegrationID)) - - if err = resourceServiceIntegrationWaitUntilActive(ctx, d, m); err != nil { - return diag.Errorf("unable to wait for service integration to become active: %s", err) - } - return resourceServiceIntegrationRead(ctx, d, m) -} - -func resourceServiceIntegrationRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - client := m.(*aiven.Client) - - projectName, integrationID, err := schemautil.SplitResourceID2(d.Id()) - if err != nil { - return diag.FromErr(err) - } - - integration, err := client.ServiceIntegrations.Get(ctx, projectName, integrationID) - if err != nil { - err = schemautil.ResourceReadHandleNotFound(err, d) - if err != nil { - return diag.Errorf("cannot get service integration: %s; id: %s", err, integrationID) - } - return nil - } - - if err = resourceServiceIntegrationCopyAPIResponseToTerraform(d, integration, projectName); err != nil { - return diag.Errorf("cannot copy api response into terraform schema: %s", err) - } - - return nil -} - -func resourceServiceIntegrationUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - client := m.(*aiven.Client) - - projectName, integrationID, err := schemautil.SplitResourceID2(d.Id()) - if err != nil { - return diag.FromErr(err) - } - - userConfig, err := resourceServiceIntegrationUserConfigFromSchemaToAPI(d) - if err != nil { - return diag.FromErr(err) - } - - if userConfig == nil { - // Required by API - userConfig = make(map[string]interface{}) - } - - _, err = client.ServiceIntegrations.Update( - ctx, - projectName, - integrationID, - aiven.UpdateServiceIntegrationRequest{ - UserConfig: userConfig, - }, - ) - if err != nil { - return diag.Errorf("unable to update service integration: %s", err) - } - if err = resourceServiceIntegrationWaitUntilActive(ctx, d, m); err != nil { - return diag.Errorf("unable to wait for service integration to become active: %s", err) - } - - return resourceServiceIntegrationRead(ctx, d, m) -} - -func resourceServiceIntegrationDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - client := m.(*aiven.Client) - - projectName, integrationID, err := schemautil.SplitResourceID2(d.Id()) - if err != nil { - return diag.FromErr(err) - } - - err = client.ServiceIntegrations.Delete(ctx, projectName, integrationID) - if err != nil && !aiven.IsNotFound(err) { - return diag.Errorf("cannot delete service integration: %s", err) - } - - return nil -} - -func resourceServiceIntegrationCheckForPreexistingResource(ctx context.Context, d *schema.ResourceData, m interface{}) (*aiven.ServiceIntegration, error) { - client := m.(*aiven.Client) - - projectName := d.Get("project").(string) - integrationType := d.Get("integration_type").(string) - sourceServiceName := d.Get("source_service_name").(string) - destinationServiceName := d.Get("destination_service_name").(string) - - integrations, err := client.ServiceIntegrations.List(ctx, projectName, sourceServiceName) - if err != nil && !aiven.IsNotFound(err) { - return nil, fmt.Errorf("unable to get list of service integrations: %s", err) - } - - for i := range integrations { - integration := integrations[i] - if integration.SourceService == nil || integration.DestinationService == nil || integration.ServiceIntegrationID == "" { - continue - } - - if integration.IntegrationType == integrationType && - *integration.SourceService == sourceServiceName && - *integration.DestinationService == destinationServiceName { - return integration, nil - } - } - return nil, nil -} - -// nolint:staticcheck // TODO: Migrate to helper/retry package to avoid deprecated resource.StateRefreshFunc. -func resourceServiceIntegrationWaitUntilActive(ctx context.Context, d *schema.ResourceData, m interface{}) error { - const ( - active = "ACTIVE" - notActive = "NOTACTIVE" - ) - client := m.(*aiven.Client) - - projectName, integrationID, err := schemautil.SplitResourceID2(d.Id()) - if err != nil { - return err - } - - stateChangeConf := &resource.StateChangeConf{ - Pending: []string{notActive}, - Target: []string{active}, - Refresh: func() (interface{}, string, error) { - log.Println("[DEBUG] Service Integration: waiting until active") - - ii, err := client.ServiceIntegrations.Get(ctx, projectName, integrationID) - if err != nil { - // Sometimes Aiven API retrieves 404 error even when a successful service integration is created - if aiven.IsNotFound(err) { - log.Println("[DEBUG] Service Integration: not yet found") - return nil, notActive, nil - } - return nil, "", err - } - if !ii.Active { - log.Println("[DEBUG] Service Integration: not yet active") - return nil, notActive, nil - } - - if ii.IntegrationType == "kafka_connect" && ii.DestinationService != nil { - if _, err := client.KafkaConnectors.List(ctx, projectName, *ii.DestinationService); err != nil { - log.Println("[DEBUG] Service Integration: error listing kafka connectors: ", err) - return nil, notActive, nil - } - } - return ii, active, nil - }, - Delay: 2 * time.Second, - Timeout: d.Timeout(schema.TimeoutCreate), - MinTimeout: 2 * time.Second, - ContinuousTargetOccurence: 10, - } - if _, err := stateChangeConf.WaitForStateContext(ctx); err != nil { - return err - } - return nil -} - -func resourceServiceIntegrationUserConfigFromSchemaToAPI(d *schema.ResourceData) (map[string]interface{}, error) { - integrationType := d.Get("integration_type").(string) - return apiconvert.ToAPI(userconfig.IntegrationTypes, integrationType, d) -} - -func resourceServiceIntegrationCopyAPIResponseToTerraform( - d *schema.ResourceData, - integration *aiven.ServiceIntegration, - project string, -) error { - if err := d.Set("project", project); err != nil { - return err - } - - if integration.DestinationEndpointID != nil { - if err := d.Set("destination_endpoint_id", schemautil.BuildResourceID(project, *integration.DestinationEndpointID)); err != nil { - return err - } - } else if integration.DestinationService != nil { - if err := d.Set("destination_service_name", *integration.DestinationService); err != nil { - return err - } - } - if integration.SourceEndpointID != nil { - if err := d.Set("source_endpoint_id", schemautil.BuildResourceID(project, *integration.SourceEndpointID)); err != nil { - return err - } - } else if integration.SourceService != nil { - if err := d.Set("source_service_name", *integration.SourceService); err != nil { - return err - } - } - if err := d.Set("integration_id", integration.ServiceIntegrationID); err != nil { - return err - } - integrationType := integration.IntegrationType - if err := d.Set("integration_type", integrationType); err != nil { - return err - } - - userConfig, err := apiconvert.FromAPI(userconfig.IntegrationTypes, integrationType, integration.UserConfig) - if err != nil { - return err - } - - if len(userConfig) > 0 { - if err := d.Set(integrationType+"_user_config", userConfig); err != nil { - return err - } - } - - return nil -} diff --git a/internal/sdkprovider/service/serviceintegration/service_integration_data_source.go b/internal/sdkprovider/service/serviceintegration/service_integration_data_source.go deleted file mode 100644 index 014bcc8da..000000000 --- a/internal/sdkprovider/service/serviceintegration/service_integration_data_source.go +++ /dev/null @@ -1,51 +0,0 @@ -package serviceintegration - -import ( - "context" - - "github.com/aiven/aiven-go-client/v2" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/aiven/terraform-provider-aiven/internal/schemautil" -) - -func DatasourceServiceIntegration() *schema.Resource { - return &schema.Resource{ - ReadContext: datasourceServiceIntegrationRead, - Description: "The Service Integration data source provides information about the existing Aiven Service Integration.", - Schema: schemautil.ResourceSchemaAsDatasourceSchema(aivenServiceIntegrationSchema, - "project", "integration_type", "source_service_name", "destination_service_name"), - } -} - -func datasourceServiceIntegrationRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - client := m.(*aiven.Client) - - projectName := d.Get("project").(string) - integrationType := d.Get("integration_type").(string) - sourceServiceName := d.Get("source_service_name").(string) - destinationServiceName := d.Get("destination_service_name").(string) - - integrations, err := client.ServiceIntegrations.List(ctx, projectName, sourceServiceName) - if err != nil { - return diag.Errorf("unable to list integrations for %s/%s: %s", projectName, sourceServiceName, err) - } - - for _, i := range integrations { - if i.SourceService == nil || i.DestinationService == nil { - continue - } - - if i.IntegrationType == integrationType && - *i.SourceService == sourceServiceName && - *i.DestinationService == destinationServiceName { - - d.SetId(schemautil.BuildResourceID(projectName, i.ServiceIntegrationID)) - return resourceServiceIntegrationRead(ctx, d, m) - } - } - - return diag.Errorf("common integration %s/%s/%s/%s not found", - projectName, integrationType, sourceServiceName, destinationServiceName) -} diff --git a/ucgenerator/main.go b/ucgenerator/main.go index 4cb3aa912..ef084010b 100644 --- a/ucgenerator/main.go +++ b/ucgenerator/main.go @@ -26,7 +26,7 @@ const ( importSchemautil = "github.com/aiven/terraform-provider-aiven/internal/schemautil" importResourceSchema = "github.com/hashicorp/terraform-plugin-framework/resource/schema" importDatasourceSchema = "github.com/hashicorp/terraform-plugin-framework/datasource/schema" - importListvalidator = "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + importSetValidator = "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" importValidator = "github.com/hashicorp/terraform-plugin-framework/schema/validator" codeGenerated = "Code generated by user config generator. DO NOT EDIT." ) @@ -143,19 +143,19 @@ func genAllForObject(f *jen.File, o *object) { // Exports handy public functions for root object only f.Op(` // Expand public function that converts tf object into dto -func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { - return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig { + return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set) } // Flatten public function that converts dto into tf object -func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set { o := new(dtoUserConfig) err := schemautil.MapToDTO(m, o) if err != nil { diags.AddError("failed to marshal map user config to dto", err.Error()) - return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs}) } - return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) + return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) } `) } @@ -169,7 +169,7 @@ func genExpander(f *jen.File, o *object) { switch p.Type { case objectTypeObject: value = jen.Op(p.varName) - v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "ExpandListBlockNested").Types(jen.Id(p.tfoStructName), jen.Id(p.dtoStructName)).Call( + v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "ExpandSetBlockNested").Types(jen.Id(p.tfoStructName), jen.Id(p.dtoStructName)).Call( jen.Id("ctx"), jen.Id("diags"), jen.Id("expand"+p.camelName), @@ -180,7 +180,7 @@ func genExpander(f *jen.File, o *object) { value = jen.Op(p.varName) if p.ArrayItems.Type == objectTypeObject { // It is a list of objects - v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "ExpandListNested").Types(jen.Id(p.tfoStructName), jen.Id(p.dtoStructName)).Call( + v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "ExpandSetNested").Types(jen.Id(p.tfoStructName), jen.Id(p.dtoStructName)).Call( jen.Id("ctx"), jen.Id("diags"), jen.Id("expand"+p.camelName), @@ -191,7 +191,7 @@ func genExpander(f *jen.File, o *object) { // It is a list of scalars // We don't want pointer scalars here t := strings.ReplaceAll(getDTOType(p.ArrayItems), "*", "") - v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "ExpandList").Types(jen.Id(t)).Call( + v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "ExpandSet").Types(jen.Id(t)).Call( jen.Id("ctx"), jen.Id("diags"), jen.Id("o").Dot(p.camelName), @@ -234,7 +234,7 @@ func genFlattener(f *jen.File, o *object) { switch p.Type { case objectTypeObject: value = jen.Op(p.varName) - v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "FlattenListBlockNested").Types(jen.Id(p.dtoStructName), jen.Id(p.tfoStructName)).Call( + v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "FlattenSetBlockNested").Types(jen.Id(p.dtoStructName), jen.Id(p.tfoStructName)).Call( jen.Id("ctx"), jen.Id("diags"), jen.Id("flatten"+p.camelName), @@ -246,7 +246,7 @@ func genFlattener(f *jen.File, o *object) { value = jen.Op(p.varName) if p.ArrayItems.Type == objectTypeObject { // It is a list of objects - v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "FlattenListNested").Types(jen.Id(p.dtoStructName), jen.Id(p.tfoStructName)).Call( + v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "FlattenSetNested").Types(jen.Id(p.dtoStructName), jen.Id(p.tfoStructName)).Call( jen.Id("ctx"), jen.Id("diags"), jen.Id("flatten"+p.camelName), @@ -256,7 +256,7 @@ func genFlattener(f *jen.File, o *object) { body = append(body, v, ifErr()) } else { //It is a list of scalars - v := jen.List(jen.Id(p.varName), jen.Id("d")).Op(":=").Qual(importTypes, "ListValueFrom").Call( + v := jen.List(jen.Id(p.varName), jen.Id("d")).Op(":=").Qual(importTypes, "SetValueFrom").Call( jen.Id("ctx"), jen.Qual(importTypes, getTFType(p.ArrayItems)+"Type"), jen.Id("o").Dot(p.camelName), @@ -309,7 +309,7 @@ func genAttrsMap(f *jen.File, o *object) { } else { v = jen.Qual(importTypes, getTFType(p.ArrayItems)+"Type") } - values[key] = jen.Qual(importTypes, "ListType").Values(jen.Dict{jen.Id("ElemType"): v}) + values[key] = jen.Qual(importTypes, "SetType").Values(jen.Dict{jen.Id("ElemType"): v}) default: values[key] = jen.Qual(importTypes, getTFType(p)+"Type") } @@ -352,7 +352,7 @@ func genSchema(f *jen.File, o *object, name, pkg string) { funcName := fmt.Sprintf("New%sSchema", name) f.Comment(fmt.Sprintf("%s returns %s schema", funcName, strings.ToLower(name))) - f.Func().Id(funcName).Params().Qual(pkg, "ListNestedBlock").Block( + f.Func().Id(funcName).Params().Qual(pkg, "SetNestedBlock").Block( jen.Return(getSchemaAttributes(o, pkg)), ) } @@ -400,7 +400,7 @@ func getSchemaAttributes(o *object, pkg string) jen.Code { values := getSchemaAttributeValues(o, isResource) values[jen.Id("NestedObject")] = jen.Qual(pkg, "NestedBlockObject").Values(nested) - return jen.Qual(pkg, "ListNestedBlock").Values(values) + return jen.Qual(pkg, "SetNestedBlock").Values(values) } func getSchemaAttributeValues(o *object, isResource bool) jen.Dict { @@ -441,7 +441,7 @@ func getSchemaAttributeValues(o *object, isResource bool) jen.Dict { } if len(validators) > 0 { - a[jen.Id("Validators")] = valValidatorList(validators...) + a[jen.Id("Validators")] = valValidatorSet(validators...) } return a @@ -452,11 +452,11 @@ func getTFType(o *object) string { switch o.Type { case objectTypeObject: if o.isNestedBlock() { - return "List" + return "Set" } return "Map" case objectTypeArray: - return "List" + return "Set" case objectTypeString: return "String" case objectTypeBoolean: @@ -573,7 +573,7 @@ func addDot(s string) string { } func getValidator(name string, v any) *jen.Statement { - return jen.Qual(importListvalidator, name).Call(jen.Lit(v)) + return jen.Qual(importSetValidator, name).Call(jen.Lit(v)) } func valSizeAtLeast(n int) *jen.Statement { @@ -584,8 +584,8 @@ func valSizeAtMost(n int) *jen.Statement { return getValidator("SizeAtMost", n) } -func valValidatorList(c ...jen.Code) *jen.Statement { - return jen.Index().Qual(importValidator, "List").Values(c...) +func valValidatorSet(c ...jen.Code) *jen.Statement { + return jen.Index().Qual(importValidator, "Set").Values(c...) } func ifErr() *jen.Statement { From 7440f643a84dcd9fe4fa0dd4e37e5d72d9242432 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Oct 2023 17:35:39 -0400 Subject: [PATCH 07/27] build(deps): bump stefanzweifel/git-auto-commit-action from 4 to 5 (#1385) --- .github/workflows/userconfig-generate-schema.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/userconfig-generate-schema.yml b/.github/workflows/userconfig-generate-schema.yml index fd52b9347..8cbff63be 100644 --- a/.github/workflows/userconfig-generate-schema.yml +++ b/.github/workflows/userconfig-generate-schema.yml @@ -21,6 +21,6 @@ jobs: run: go install golang.org/x/tools/cmd/goimports@latest - name: generate run: make generate - - uses: stefanzweifel/git-auto-commit-action@v4 + - uses: stefanzweifel/git-auto-commit-action@v5 with: commit_message: "chore(userconfig): generate schema" From 7125e02e6785a627c37c4f44ab6015484b1f6fb5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Oct 2023 09:54:08 +0300 Subject: [PATCH 08/27] build(deps): bump golang.org/x/net from 0.13.0 to 0.17.0 (#1389) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 6 +++--- go.sum | 16 ++++++++-------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/go.mod b/go.mod index 041e9f73e..763706dfa 100644 --- a/go.mod +++ b/go.mod @@ -95,11 +95,11 @@ require ( github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/zclconf/go-cty v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/crypto v0.13.0 // indirect + golang.org/x/crypto v0.14.0 // indirect golang.org/x/mod v0.12.0 // indirect - golang.org/x/net v0.13.0 // indirect + golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.7.0 // indirect - golang.org/x/sys v0.12.0 // indirect + golang.org/x/sys v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect google.golang.org/api v0.114.0 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/go.sum b/go.sum index 283ea751e..1e476fa4d 100644 --- a/go.sum +++ b/go.sum @@ -589,8 +589,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -683,8 +683,8 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -800,16 +800,16 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU= -golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From e0732dbc63232ff5da97ca3c14df05acf0b5e96b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Oct 2023 06:58:48 +0000 Subject: [PATCH 09/27] build(deps): bump github.com/google/go-cmp from 0.5.9 to 0.6.0 (#1388) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 763706dfa..95bbcc54d 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/dave/jennifer v1.7.0 github.com/docker/go-units v0.5.0 github.com/ettle/strcase v0.1.1 - github.com/google/go-cmp v0.5.9 + github.com/google/go-cmp v0.6.0 github.com/gruntwork-io/terratest v0.45.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/terraform-plugin-framework v1.4.0 diff --git a/go.sum b/go.sum index 1e476fa4d..87a80e54f 100644 --- a/go.sum +++ b/go.sum @@ -333,8 +333,9 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= From 279658000f6b17c74880c603d26df8941047b00f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Oct 2023 10:33:43 +0300 Subject: [PATCH 10/27] build(deps): bump github.com/hashicorp/terraform-plugin-framework from 1.4.0 to 1.4.1 (#1387) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 95bbcc54d..1692427dd 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/google/go-cmp v0.6.0 github.com/gruntwork-io/terratest v0.45.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 - github.com/hashicorp/terraform-plugin-framework v1.4.0 + github.com/hashicorp/terraform-plugin-framework v1.4.1 github.com/hashicorp/terraform-plugin-go v0.19.0 github.com/hashicorp/terraform-plugin-mux v0.12.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.29.0 diff --git a/go.sum b/go.sum index 87a80e54f..0b0902301 100644 --- a/go.sum +++ b/go.sum @@ -423,8 +423,8 @@ github.com/hashicorp/terraform-exec v0.19.0 h1:FpqZ6n50Tk95mItTSS9BjeOVUb4eg81Sp github.com/hashicorp/terraform-exec v0.19.0/go.mod h1:tbxUpe3JKruE9Cuf65mycSIT8KiNPZ0FkuTE3H4urQg= github.com/hashicorp/terraform-json v0.17.1 h1:eMfvh/uWggKmY7Pmb3T85u86E2EQg6EQHgyRwf3RkyA= github.com/hashicorp/terraform-json v0.17.1/go.mod h1:Huy6zt6euxaY9knPAFKjUITn8QxUFIe9VuSzb4zn/0o= -github.com/hashicorp/terraform-plugin-framework v1.4.0 h1:WKbtCRtNrjsh10eA7NZvC/Qyr7zp77j+D21aDO5th9c= -github.com/hashicorp/terraform-plugin-framework v1.4.0/go.mod h1:XC0hPcQbBvlbxwmjxuV/8sn8SbZRg4XwGMs22f+kqV0= +github.com/hashicorp/terraform-plugin-framework v1.4.1 h1:ZC29MoB3Nbov6axHdgPbMz7799pT5H8kIrM8YAsaVrs= +github.com/hashicorp/terraform-plugin-framework v1.4.1/go.mod h1:XC0hPcQbBvlbxwmjxuV/8sn8SbZRg4XwGMs22f+kqV0= github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 h1:gm5b1kHgFFhaKFhm4h2TgvMUlNzFAtUqlcOWnWPm+9E= github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1/go.mod h1:MsjL1sQ9L7wGwzJ5RjcI6FzEMdyoBnw+XK8ZnOvQOLY= github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 h1:HOjBuMbOEzl7snOdOoUfE2Jgeto6JOjLVQ39Ls2nksc= From c49c95c6c58778af148f85bdab9590b1641d8d55 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Oct 2023 07:48:12 +0000 Subject: [PATCH 11/27] build(deps): bump github.com/gruntwork-io/terratest from 0.45.0 to 0.46.0 (#1386) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Ivan Savciuc --- go.mod | 3 ++- go.sum | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 1692427dd..c1d85b45a 100644 --- a/go.mod +++ b/go.mod @@ -8,8 +8,9 @@ require ( github.com/dave/jennifer v1.7.0 github.com/docker/go-units v0.5.0 github.com/ettle/strcase v0.1.1 + com/gruntwork-io/terratest v0.46.0 github.com/google/go-cmp v0.6.0 - github.com/gruntwork-io/terratest v0.45.0 + github.com/gruntwork-io/terratest v0.46.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/terraform-plugin-framework v1.4.1 github.com/hashicorp/terraform-plugin-go v0.19.0 diff --git a/go.sum b/go.sum index 0b0902301..8162e34c7 100644 --- a/go.sum +++ b/go.sum @@ -379,8 +379,8 @@ github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6c github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/gruntwork-io/terratest v0.45.0 h1:02VuyLRmqOO45TaTH4P4mc44S18er5Rn4CooTUY0uek= -github.com/gruntwork-io/terratest v0.45.0/go.mod h1:4TWB5SYgATxJFfg+RNpE0gwiUWxtfMLGOXo5gwcGgMs= +github.com/gruntwork-io/terratest v0.46.0 h1:ezeJ045eOniWO+0T78SFQrVo6tIJ2or/DBtYvJnweOs= +github.com/gruntwork-io/terratest v0.46.0/go.mod h1:4TWB5SYgATxJFfg+RNpE0gwiUWxtfMLGOXo5gwcGgMs= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= From a383992be4251d7cdf7fbec5b4eb95f6904b5728 Mon Sep 17 00:00:00 2001 From: Ivan Savciuc Date: Fri, 13 Oct 2023 12:40:19 +0300 Subject: [PATCH 12/27] fix(go.mod): fix broken dependency (#1391) --- go.mod | 1 - 1 file changed, 1 deletion(-) diff --git a/go.mod b/go.mod index c1d85b45a..a44e9c9c0 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,6 @@ require ( github.com/dave/jennifer v1.7.0 github.com/docker/go-units v0.5.0 github.com/ettle/strcase v0.1.1 - com/gruntwork-io/terratest v0.46.0 github.com/google/go-cmp v0.6.0 github.com/gruntwork-io/terratest v0.46.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 From 58b884da986952a00c00ca1e690d38601ee9bde0 Mon Sep 17 00:00:00 2001 From: Murad Biashimov Date: Mon, 16 Oct 2023 10:13:22 +0200 Subject: [PATCH 13/27] fix: user config marshal error omitted (#1392) --- .../service_integration_resource.go | 24 +++--- .../service/serviceintegration/userconfig.go | 81 +++++++++---------- .../clickhousekafka/clickhouse_kafka_test.go | 26 +++--- .../clickhouse_postgresql_test.go | 26 +++--- .../integration/datadog/datadog_test.go | 26 +++--- .../external_aws_cloudwatch_metrics_test.go | 26 +++--- .../kafkaconnect/kafka_connect_test.go | 26 +++--- .../integration/kafkalogs/kafka_logs_test.go | 26 +++--- .../kafka_mirrormaker_test.go | 26 +++--- .../userconfig/integration/logs/logs_test.go | 26 +++--- .../integration/metrics/metrics_test.go | 26 +++--- internal/schemautil/plugin.go | 15 ++-- .../dist/integration_endpoint_types.go | 3 +- .../userconfig/dist/integration_types.go | 3 +- .../userconfig/dist/service_types.go | 3 +- ucgenerator/tests.go | 8 +- 16 files changed, 184 insertions(+), 187 deletions(-) diff --git a/internal/plugin/service/serviceintegration/service_integration_resource.go b/internal/plugin/service/serviceintegration/service_integration_resource.go index 60a737bd3..6eb8883a9 100644 --- a/internal/plugin/service/serviceintegration/service_integration_resource.go +++ b/internal/plugin/service/serviceintegration/service_integration_resource.go @@ -161,7 +161,7 @@ func (s *serviceIntegrationResource) Create(ctx context.Context, req resource.Cr // all other integrations should be imported using `terraform import` if o.IntegrationType.ValueString() == readReplicaType { if preexisting, err := getSIByName(ctx, s.client, &o); err != nil { - resp.Diagnostics.AddError("unable to search for possible preexisting 'read_replica' service integration", err.Error()) + resp.Diagnostics.AddError("Unable to search for possible preexisting 'read_replica' service integration", err.Error()) return } else if preexisting != nil { o.IntegrationID = types.StringValue(preexisting.ServiceIntegrationID) @@ -170,11 +170,11 @@ func (s *serviceIntegrationResource) Create(ctx context.Context, req resource.Cr } } - userConfig, err := expandUserConfig(ctx, &resp.Diagnostics, &o, true) - if err != nil { - resp.Diagnostics.AddError("Failed to expand user config", err.Error()) + userConfig := expandUserConfig(ctx, &resp.Diagnostics, &o, true) + if resp.Diagnostics.HasError() { return } + createReq := aiven.CreateServiceIntegrationRequest{ DestinationProject: getProjectPointer(o.DestinationEndpointID.ValueString()), DestinationEndpointID: getEndpointIDPointer(o.DestinationEndpointID.ValueString()), @@ -206,27 +206,27 @@ func (s *serviceIntegrationResource) Read(ctx context.Context, req resource.Read } func (s *serviceIntegrationResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { - var state resourceModel - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + var o resourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &o)...) if resp.Diagnostics.HasError() { return } - var o resourceModel - resp.Diagnostics.Append(req.Plan.Get(ctx, &o)...) + // We read state to get integration's ID + var state resourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) if resp.Diagnostics.HasError() { return } // Copies ID from the state o.IntegrationID = state.IntegrationID - userConfig, err := expandUserConfig(ctx, &resp.Diagnostics, &o, false) - if err != nil { - resp.Diagnostics.AddError("Failed to expand user config", err.Error()) + userConfig := expandUserConfig(ctx, &resp.Diagnostics, &o, false) + if resp.Diagnostics.HasError() { return } - _, err = s.client.ServiceIntegrations.Update( + _, err := s.client.ServiceIntegrations.Update( ctx, state.Project.ValueString(), state.IntegrationID.ValueString(), diff --git a/internal/plugin/service/serviceintegration/userconfig.go b/internal/plugin/service/serviceintegration/userconfig.go index d398f18ba..70f3bced8 100644 --- a/internal/plugin/service/serviceintegration/userconfig.go +++ b/internal/plugin/service/serviceintegration/userconfig.go @@ -5,7 +5,6 @@ import ( "github.com/aiven/aiven-go-client/v2" "github.com/hashicorp/terraform-plugin-framework/diag" - "github.com/hashicorp/terraform-plugin-framework/types" "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/clickhousekafka" "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/clickhousepostgresql" @@ -74,63 +73,63 @@ func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *resource // We set user config from Aiven only if it's been set in TF // Otherwise it will produce invalid "after apply" switch { - case isSet(o.ClickhouseKafkaUserConfig): + case schemautil.HasValue(o.ClickhouseKafkaUserConfig): o.ClickhouseKafkaUserConfig = clickhousekafka.Flatten(ctx, diags, dto.UserConfig) - case isSet(o.ClickhousePostgresqlUserConfig): + case schemautil.HasValue(o.ClickhousePostgresqlUserConfig): o.ClickhousePostgresqlUserConfig = clickhousepostgresql.Flatten(ctx, diags, dto.UserConfig) - case isSet(o.DatadogUserConfig): + case schemautil.HasValue(o.DatadogUserConfig): o.DatadogUserConfig = datadog.Flatten(ctx, diags, dto.UserConfig) - case isSet(o.ExternalAwsCloudwatchMetricsUserConfig): + case schemautil.HasValue(o.ExternalAwsCloudwatchMetricsUserConfig): o.ExternalAwsCloudwatchMetricsUserConfig = externalawscloudwatchmetrics.Flatten(ctx, diags, dto.UserConfig) - case isSet(o.KafkaConnectUserConfig): + case schemautil.HasValue(o.KafkaConnectUserConfig): o.KafkaConnectUserConfig = kafkaconnect.Flatten(ctx, diags, dto.UserConfig) - case isSet(o.KafkaLogsUserConfig): + case schemautil.HasValue(o.KafkaLogsUserConfig): o.KafkaLogsUserConfig = kafkalogs.Flatten(ctx, diags, dto.UserConfig) - case isSet(o.KafkaMirrormakerUserConfig): + case schemautil.HasValue(o.KafkaMirrormakerUserConfig): o.KafkaMirrormakerUserConfig = kafkamirrormaker.Flatten(ctx, diags, dto.UserConfig) - case isSet(o.LogsUserConfig): + case schemautil.HasValue(o.LogsUserConfig): o.LogsUserConfig = logs.Flatten(ctx, diags, dto.UserConfig) - case isSet(o.MetricsUserConfig): + case schemautil.HasValue(o.MetricsUserConfig): o.MetricsUserConfig = metrics.Flatten(ctx, diags, dto.UserConfig) } } // expandUserConfig from terraform to aiven -func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *resourceModel, create bool) (map[string]any, error) { - var marshal func(any) (map[string]any, error) - if create { - marshal = schemautil.MarshalCreateUserConfig - } else { - marshal = schemautil.MarshalUpdateUserConfig - } +func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *resourceModel, create bool) map[string]any { + var config any - // If invalid integration type is set + // If an invalid integration type is set // This will send wrong config to Aiven // Which is sort of a validation too switch { - case isSet(o.ClickhouseKafkaUserConfig): - return marshal(clickhousekafka.Expand(ctx, diags, o.ClickhouseKafkaUserConfig)) - case isSet(o.ClickhousePostgresqlUserConfig): - return marshal(clickhousepostgresql.Expand(ctx, diags, o.ClickhousePostgresqlUserConfig)) - case isSet(o.DatadogUserConfig): - return marshal(datadog.Expand(ctx, diags, o.DatadogUserConfig)) - case isSet(o.ExternalAwsCloudwatchMetricsUserConfig): - return marshal(externalawscloudwatchmetrics.Expand(ctx, diags, o.ExternalAwsCloudwatchMetricsUserConfig)) - case isSet(o.KafkaConnectUserConfig): - return marshal(kafkaconnect.Expand(ctx, diags, o.KafkaConnectUserConfig)) - case isSet(o.KafkaLogsUserConfig): - return marshal(kafkalogs.Expand(ctx, diags, o.KafkaLogsUserConfig)) - case isSet(o.KafkaMirrormakerUserConfig): - return marshal(kafkamirrormaker.Expand(ctx, diags, o.KafkaMirrormakerUserConfig)) - case isSet(o.LogsUserConfig): - return marshal(logs.Expand(ctx, diags, o.LogsUserConfig)) - case isSet(o.MetricsUserConfig): - return marshal(metrics.Expand(ctx, diags, o.MetricsUserConfig)) - default: - return nil, nil + case schemautil.HasValue(o.ClickhouseKafkaUserConfig): + config = clickhousekafka.Expand(ctx, diags, o.ClickhouseKafkaUserConfig) + case schemautil.HasValue(o.ClickhousePostgresqlUserConfig): + config = clickhousepostgresql.Expand(ctx, diags, o.ClickhousePostgresqlUserConfig) + case schemautil.HasValue(o.DatadogUserConfig): + config = datadog.Expand(ctx, diags, o.DatadogUserConfig) + case schemautil.HasValue(o.ExternalAwsCloudwatchMetricsUserConfig): + config = externalawscloudwatchmetrics.Expand(ctx, diags, o.ExternalAwsCloudwatchMetricsUserConfig) + case schemautil.HasValue(o.KafkaConnectUserConfig): + config = kafkaconnect.Expand(ctx, diags, o.KafkaConnectUserConfig) + case schemautil.HasValue(o.KafkaLogsUserConfig): + config = kafkalogs.Expand(ctx, diags, o.KafkaLogsUserConfig) + case schemautil.HasValue(o.KafkaMirrormakerUserConfig): + config = kafkamirrormaker.Expand(ctx, diags, o.KafkaMirrormakerUserConfig) + case schemautil.HasValue(o.LogsUserConfig): + config = logs.Expand(ctx, diags, o.LogsUserConfig) + case schemautil.HasValue(o.MetricsUserConfig): + config = metrics.Expand(ctx, diags, o.MetricsUserConfig) } -} -func isSet(o types.Set) bool { - return !(o.IsUnknown() || o.IsNull()) + if diags.HasError() { + return nil + } + + dict, err := schemautil.MarshalUserConfig(config, create) + if err != nil { + diags.AddError("Failed to expand user config", err.Error()) + return nil + } + return dict } diff --git a/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka_test.go b/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka_test.go index 77316cfac..0f1a9a6ee 100644 --- a/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka_test.go +++ b/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka_test.go @@ -73,22 +73,22 @@ const updateOnlyFields = `{ func TestUserConfig(t *testing.T) { cases := []struct { - name string - source string - expect string - marshal func(any) (map[string]any, error) + name string + source string + expect string + create bool }{ { - name: "fields to create resource", - source: allFields, - expect: allFields, - marshal: schemautil.MarshalCreateUserConfig, + name: "fields to create resource", + source: allFields, + expect: allFields, + create: true, }, { - name: "only fields to update resource", - source: allFields, - expect: updateOnlyFields, // usually, fewer fields - marshal: schemautil.MarshalUpdateUserConfig, + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + create: false, }, } @@ -109,7 +109,7 @@ func TestUserConfig(t *testing.T) { require.Empty(t, diags) // Run specific marshal (create or update resource) - dtoConfig, err := opt.marshal(config) + dtoConfig, err := schemautil.MarshalUserConfig(config, opt.create) require.NoError(t, err) // Compares that output is strictly equal to the input diff --git a/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql_test.go b/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql_test.go index 217dea78b..330c56bc1 100644 --- a/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql_test.go +++ b/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql_test.go @@ -33,22 +33,22 @@ const updateOnlyFields = `{ func TestUserConfig(t *testing.T) { cases := []struct { - name string - source string - expect string - marshal func(any) (map[string]any, error) + name string + source string + expect string + create bool }{ { - name: "fields to create resource", - source: allFields, - expect: allFields, - marshal: schemautil.MarshalCreateUserConfig, + name: "fields to create resource", + source: allFields, + expect: allFields, + create: true, }, { - name: "only fields to update resource", - source: allFields, - expect: updateOnlyFields, // usually, fewer fields - marshal: schemautil.MarshalUpdateUserConfig, + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + create: false, }, } @@ -69,7 +69,7 @@ func TestUserConfig(t *testing.T) { require.Empty(t, diags) // Run specific marshal (create or update resource) - dtoConfig, err := opt.marshal(config) + dtoConfig, err := schemautil.MarshalUserConfig(config, opt.create) require.NoError(t, err) // Compares that output is strictly equal to the input diff --git a/internal/plugin/service/userconfig/integration/datadog/datadog_test.go b/internal/plugin/service/userconfig/integration/datadog/datadog_test.go index 7dc27e200..04190b0f4 100644 --- a/internal/plugin/service/userconfig/integration/datadog/datadog_test.go +++ b/internal/plugin/service/userconfig/integration/datadog/datadog_test.go @@ -83,22 +83,22 @@ const updateOnlyFields = `{ func TestUserConfig(t *testing.T) { cases := []struct { - name string - source string - expect string - marshal func(any) (map[string]any, error) + name string + source string + expect string + create bool }{ { - name: "fields to create resource", - source: allFields, - expect: allFields, - marshal: schemautil.MarshalCreateUserConfig, + name: "fields to create resource", + source: allFields, + expect: allFields, + create: true, }, { - name: "only fields to update resource", - source: allFields, - expect: updateOnlyFields, // usually, fewer fields - marshal: schemautil.MarshalUpdateUserConfig, + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + create: false, }, } @@ -119,7 +119,7 @@ func TestUserConfig(t *testing.T) { require.Empty(t, diags) // Run specific marshal (create or update resource) - dtoConfig, err := opt.marshal(config) + dtoConfig, err := schemautil.MarshalUserConfig(config, opt.create) require.NoError(t, err) // Compares that output is strictly equal to the input diff --git a/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics_test.go b/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics_test.go index 9cf794599..9795bc385 100644 --- a/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics_test.go +++ b/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics_test.go @@ -45,22 +45,22 @@ const updateOnlyFields = `{ func TestUserConfig(t *testing.T) { cases := []struct { - name string - source string - expect string - marshal func(any) (map[string]any, error) + name string + source string + expect string + create bool }{ { - name: "fields to create resource", - source: allFields, - expect: allFields, - marshal: schemautil.MarshalCreateUserConfig, + name: "fields to create resource", + source: allFields, + expect: allFields, + create: true, }, { - name: "only fields to update resource", - source: allFields, - expect: updateOnlyFields, // usually, fewer fields - marshal: schemautil.MarshalUpdateUserConfig, + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + create: false, }, } @@ -81,7 +81,7 @@ func TestUserConfig(t *testing.T) { require.Empty(t, diags) // Run specific marshal (create or update resource) - dtoConfig, err := opt.marshal(config) + dtoConfig, err := schemautil.MarshalUserConfig(config, opt.create) require.NoError(t, err) // Compares that output is strictly equal to the input diff --git a/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect_test.go b/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect_test.go index 964993b87..69d9ae0cf 100644 --- a/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect_test.go +++ b/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect_test.go @@ -33,22 +33,22 @@ const updateOnlyFields = `{ func TestUserConfig(t *testing.T) { cases := []struct { - name string - source string - expect string - marshal func(any) (map[string]any, error) + name string + source string + expect string + create bool }{ { - name: "fields to create resource", - source: allFields, - expect: allFields, - marshal: schemautil.MarshalCreateUserConfig, + name: "fields to create resource", + source: allFields, + expect: allFields, + create: true, }, { - name: "only fields to update resource", - source: allFields, - expect: updateOnlyFields, // usually, fewer fields - marshal: schemautil.MarshalUpdateUserConfig, + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + create: false, }, } @@ -69,7 +69,7 @@ func TestUserConfig(t *testing.T) { require.Empty(t, diags) // Run specific marshal (create or update resource) - dtoConfig, err := opt.marshal(config) + dtoConfig, err := schemautil.MarshalUserConfig(config, opt.create) require.NoError(t, err) // Compares that output is strictly equal to the input diff --git a/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs_test.go b/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs_test.go index 03a1c9ecc..166dd35a6 100644 --- a/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs_test.go +++ b/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs_test.go @@ -29,22 +29,22 @@ const updateOnlyFields = `{ func TestUserConfig(t *testing.T) { cases := []struct { - name string - source string - expect string - marshal func(any) (map[string]any, error) + name string + source string + expect string + create bool }{ { - name: "fields to create resource", - source: allFields, - expect: allFields, - marshal: schemautil.MarshalCreateUserConfig, + name: "fields to create resource", + source: allFields, + expect: allFields, + create: true, }, { - name: "only fields to update resource", - source: allFields, - expect: updateOnlyFields, // usually, fewer fields - marshal: schemautil.MarshalUpdateUserConfig, + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + create: false, }, } @@ -65,7 +65,7 @@ func TestUserConfig(t *testing.T) { require.Empty(t, diags) // Run specific marshal (create or update resource) - dtoConfig, err := opt.marshal(config) + dtoConfig, err := schemautil.MarshalUserConfig(config, opt.create) require.NoError(t, err) // Compares that output is strictly equal to the input diff --git a/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker_test.go b/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker_test.go index 1e269a0d6..611545286 100644 --- a/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker_test.go +++ b/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker_test.go @@ -39,22 +39,22 @@ const updateOnlyFields = `{ func TestUserConfig(t *testing.T) { cases := []struct { - name string - source string - expect string - marshal func(any) (map[string]any, error) + name string + source string + expect string + create bool }{ { - name: "fields to create resource", - source: allFields, - expect: allFields, - marshal: schemautil.MarshalCreateUserConfig, + name: "fields to create resource", + source: allFields, + expect: allFields, + create: true, }, { - name: "only fields to update resource", - source: allFields, - expect: updateOnlyFields, // usually, fewer fields - marshal: schemautil.MarshalUpdateUserConfig, + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + create: false, }, } @@ -75,7 +75,7 @@ func TestUserConfig(t *testing.T) { require.Empty(t, diags) // Run specific marshal (create or update resource) - dtoConfig, err := opt.marshal(config) + dtoConfig, err := schemautil.MarshalUserConfig(config, opt.create) require.NoError(t, err) // Compares that output is strictly equal to the input diff --git a/internal/plugin/service/userconfig/integration/logs/logs_test.go b/internal/plugin/service/userconfig/integration/logs/logs_test.go index 9635dcc31..bab47414f 100644 --- a/internal/plugin/service/userconfig/integration/logs/logs_test.go +++ b/internal/plugin/service/userconfig/integration/logs/logs_test.go @@ -31,22 +31,22 @@ const updateOnlyFields = `{ func TestUserConfig(t *testing.T) { cases := []struct { - name string - source string - expect string - marshal func(any) (map[string]any, error) + name string + source string + expect string + create bool }{ { - name: "fields to create resource", - source: allFields, - expect: allFields, - marshal: schemautil.MarshalCreateUserConfig, + name: "fields to create resource", + source: allFields, + expect: allFields, + create: true, }, { - name: "only fields to update resource", - source: allFields, - expect: updateOnlyFields, // usually, fewer fields - marshal: schemautil.MarshalUpdateUserConfig, + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + create: false, }, } @@ -67,7 +67,7 @@ func TestUserConfig(t *testing.T) { require.Empty(t, diags) // Run specific marshal (create or update resource) - dtoConfig, err := opt.marshal(config) + dtoConfig, err := schemautil.MarshalUserConfig(config, opt.create) require.NoError(t, err) // Compares that output is strictly equal to the input diff --git a/internal/plugin/service/userconfig/integration/metrics/metrics_test.go b/internal/plugin/service/userconfig/integration/metrics/metrics_test.go index 07c3e6b2f..f20d69dd9 100644 --- a/internal/plugin/service/userconfig/integration/metrics/metrics_test.go +++ b/internal/plugin/service/userconfig/integration/metrics/metrics_test.go @@ -65,22 +65,22 @@ const updateOnlyFields = `{ func TestUserConfig(t *testing.T) { cases := []struct { - name string - source string - expect string - marshal func(any) (map[string]any, error) + name string + source string + expect string + create bool }{ { - name: "fields to create resource", - source: allFields, - expect: allFields, - marshal: schemautil.MarshalCreateUserConfig, + name: "fields to create resource", + source: allFields, + expect: allFields, + create: true, }, { - name: "only fields to update resource", - source: allFields, - expect: updateOnlyFields, // usually, fewer fields - marshal: schemautil.MarshalUpdateUserConfig, + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + create: false, }, } @@ -101,7 +101,7 @@ func TestUserConfig(t *testing.T) { require.Empty(t, diags) // Run specific marshal (create or update resource) - dtoConfig, err := opt.marshal(config) + dtoConfig, err := schemautil.MarshalUserConfig(config, opt.create) require.NoError(t, err) // Compares that output is strictly equal to the input diff --git a/internal/schemautil/plugin.go b/internal/schemautil/plugin.go index 6f94a84e5..738624803 100644 --- a/internal/schemautil/plugin.go +++ b/internal/schemautil/plugin.go @@ -98,13 +98,10 @@ func marshalUserConfig(c any, groups ...string) (map[string]any, error) { return m, nil } -// MarshalCreateUserConfig returns marshaled user config for Create operation -func MarshalCreateUserConfig(c any) (map[string]any, error) { - return marshalUserConfig(c, "create", "update") -} - -// MarshalUpdateUserConfig returns marshaled user config for Update operation -func MarshalUpdateUserConfig(c any) (map[string]any, error) { +func MarshalUserConfig(c any, create bool) (map[string]any, error) { + if create { + return marshalUserConfig(c, "create", "update") + } return marshalUserConfig(c, "update") } @@ -151,3 +148,7 @@ func ValueFloat64Pointer(v types.Float64) *float64 { } return v.ValueFloat64Pointer() } + +func HasValue(o types.Set) bool { + return !(o.IsUnknown() || o.IsNull()) +} diff --git a/internal/schemautil/userconfig/dist/integration_endpoint_types.go b/internal/schemautil/userconfig/dist/integration_endpoint_types.go index 4369fec7f..55989ada9 100644 --- a/internal/schemautil/userconfig/dist/integration_endpoint_types.go +++ b/internal/schemautil/userconfig/dist/integration_endpoint_types.go @@ -3,9 +3,8 @@ package dist import ( - schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" + schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) // IntegrationEndpointTypeDatadog is a generated function returning the schema of the datadog IntegrationEndpointType. diff --git a/internal/schemautil/userconfig/dist/integration_types.go b/internal/schemautil/userconfig/dist/integration_types.go index de191b448..66b10740f 100644 --- a/internal/schemautil/userconfig/dist/integration_types.go +++ b/internal/schemautil/userconfig/dist/integration_types.go @@ -3,9 +3,8 @@ package dist import ( - schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" + schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) // IntegrationTypeClickhouseKafka is a generated function returning the schema of the clickhouse_kafka IntegrationType. diff --git a/internal/schemautil/userconfig/dist/service_types.go b/internal/schemautil/userconfig/dist/service_types.go index 337cbd298..cb772a8da 100644 --- a/internal/schemautil/userconfig/dist/service_types.go +++ b/internal/schemautil/userconfig/dist/service_types.go @@ -3,9 +3,8 @@ package dist import ( - schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" + schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) // ServiceTypeCassandra is a generated function returning the schema of the cassandra ServiceType. diff --git a/ucgenerator/tests.go b/ucgenerator/tests.go index fe63ec908..2cffe73e5 100644 --- a/ucgenerator/tests.go +++ b/ucgenerator/tests.go @@ -104,19 +104,19 @@ func Test%[3]s(t *testing.T) { name string source string expect string - marshal func (any) (map[string]any, error) + create bool }{ { name: "fields to create resource", source: allFields, expect: allFields, - marshal: schemautil.MarshalCreateUserConfig, + create: true, }, { name: "only fields to update resource", source: allFields, expect: updateOnlyFields, // usually, fewer fields - marshal: schemautil.MarshalUpdateUserConfig, + create: false, }, } @@ -137,7 +137,7 @@ func Test%[3]s(t *testing.T) { require.Empty(t, diags) // Run specific marshal (create or update resource) - dtoConfig, err := opt.marshal(config) + dtoConfig, err := schemautil.MarshalUserConfig(config, opt.create) require.NoError(t, err) // Compares that output is strictly equal to the input From f25ad3e5549cec5820db9e93cc526d8c026f3a8a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Oct 2023 00:43:08 +0000 Subject: [PATCH 14/27] build(deps): bump github.com/aiven/go-api-schemas from 1.35.0 to 1.36.0 (#1394) --- go.mod | 2 +- go.sum | 4 ++-- .../schemautil/userconfig/dist/integration_endpoint_types.go | 3 ++- internal/schemautil/userconfig/dist/integration_types.go | 3 ++- internal/schemautil/userconfig/dist/service_types.go | 3 ++- 5 files changed, 9 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index a44e9c9c0..da817a758 100644 --- a/go.mod +++ b/go.mod @@ -46,7 +46,7 @@ require ( cloud.google.com/go v0.110.0 // indirect cloud.google.com/go/storage v1.28.1 // indirect github.com/agext/levenshtein v1.2.3 // indirect - github.com/aiven/go-api-schemas v1.35.0 + github.com/aiven/go-api-schemas v1.36.0 github.com/aws/aws-sdk-go v1.44.122 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/davecgh/go-spew v1.1.1 // indirect diff --git a/go.sum b/go.sum index 8162e34c7..ec585faac 100644 --- a/go.sum +++ b/go.sum @@ -203,8 +203,8 @@ github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7l github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/aiven/aiven-go-client/v2 v2.1.0 h1:n8k34HpEQ7KgxRcyX/F5WKR6xh8MSAM6TtPHnghDNGg= github.com/aiven/aiven-go-client/v2 v2.1.0/go.mod h1:x0xhzxWEKAwKv0xY5FvECiI6tesWshcPHvjwl0B/1SU= -github.com/aiven/go-api-schemas v1.35.0 h1:hNimpMWeFjU44AmiDzMWo8Hzimg+uoe4GPpoL3HGOi8= -github.com/aiven/go-api-schemas v1.35.0/go.mod h1:RmQ8MfxwxAP2ji9eJtP6dICOaTMcQD9b5aQT3Bp7uzI= +github.com/aiven/go-api-schemas v1.36.0 h1:v74EatWotTayXaVWLgx26S1IikGCkmRsnXDrt3yHYes= +github.com/aiven/go-api-schemas v1.36.0/go.mod h1:/bPxBUHza/2Aeer6hIIdB++GxKiw9K1KCBtRa2rtZ5I= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= diff --git a/internal/schemautil/userconfig/dist/integration_endpoint_types.go b/internal/schemautil/userconfig/dist/integration_endpoint_types.go index 55989ada9..4369fec7f 100644 --- a/internal/schemautil/userconfig/dist/integration_endpoint_types.go +++ b/internal/schemautil/userconfig/dist/integration_endpoint_types.go @@ -3,8 +3,9 @@ package dist import ( - schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" ) // IntegrationEndpointTypeDatadog is a generated function returning the schema of the datadog IntegrationEndpointType. diff --git a/internal/schemautil/userconfig/dist/integration_types.go b/internal/schemautil/userconfig/dist/integration_types.go index 66b10740f..de191b448 100644 --- a/internal/schemautil/userconfig/dist/integration_types.go +++ b/internal/schemautil/userconfig/dist/integration_types.go @@ -3,8 +3,9 @@ package dist import ( - schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" ) // IntegrationTypeClickhouseKafka is a generated function returning the schema of the clickhouse_kafka IntegrationType. diff --git a/internal/schemautil/userconfig/dist/service_types.go b/internal/schemautil/userconfig/dist/service_types.go index cb772a8da..337cbd298 100644 --- a/internal/schemautil/userconfig/dist/service_types.go +++ b/internal/schemautil/userconfig/dist/service_types.go @@ -3,8 +3,9 @@ package dist import ( - schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" ) // ServiceTypeCassandra is a generated function returning the schema of the cassandra ServiceType. From c464bfd9be0bb8eb39d77dc3d767f08e8adaa05f Mon Sep 17 00:00:00 2001 From: Aleksander Zaruczewski Date: Mon, 16 Oct 2023 23:24:47 -0700 Subject: [PATCH 15/27] test(opensearch): fix weak password error (#1395) --- .../opensearch_security_plugin_config_test.go | 13 +++++-------- .../service/opensearch/opensearch_user_test.go | 15 +++++++++------ 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/internal/sdkprovider/service/opensearch/opensearch_security_plugin_config_test.go b/internal/sdkprovider/service/opensearch/opensearch_security_plugin_config_test.go index 2c3b58d95..8600d661b 100644 --- a/internal/sdkprovider/service/opensearch/opensearch_security_plugin_config_test.go +++ b/internal/sdkprovider/service/opensearch/opensearch_security_plugin_config_test.go @@ -13,9 +13,6 @@ import ( acc "github.com/aiven/terraform-provider-aiven/internal/acctest" ) -// openSearchSecurityPluginTestPassword is the password used for the OpenSearch Security Plugin Config tests. -const openSearchSecurityPluginTestPassword = "ThisIsATest123^=^" - // TestAccAivenOpenSearchSecurityPluginConfig_basic tests the basic functionality of the OpenSearch Security Plugin // Config resource. func TestAccAivenOpenSearchSecurityPluginConfig_basic(t *testing.T) { @@ -53,14 +50,14 @@ resource "aiven_opensearch_security_plugin_config" "foo" { admin_password = "%s" depends_on = [aiven_opensearch.bar, aiven_opensearch_user.foo] -}`, os.Getenv("AIVEN_PROJECT_NAME"), rName, openSearchSecurityPluginTestPassword), +}`, os.Getenv("AIVEN_PROJECT_NAME"), rName, openSearchTestPassword), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "project", os.Getenv("AIVEN_PROJECT_NAME")), resource.TestCheckResourceAttr( resourceName, "service_name", fmt.Sprintf("test-acc-sr-os-sec-plugin-%s", rName), ), resource.TestCheckResourceAttr( - resourceName, "admin_password", openSearchSecurityPluginTestPassword, + resourceName, "admin_password", openSearchTestPassword, ), resource.TestCheckResourceAttr(resourceName, "available", "true"), resource.TestCheckResourceAttr(resourceName, "enabled", "true"), @@ -88,14 +85,14 @@ resource "aiven_opensearch_security_plugin_config" "foo" { admin_password = "%s" depends_on = [aiven_opensearch.bar] -}`, os.Getenv("AIVEN_PROJECT_NAME"), rName, openSearchSecurityPluginTestPassword), +}`, os.Getenv("AIVEN_PROJECT_NAME"), rName, openSearchTestPassword), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "project", os.Getenv("AIVEN_PROJECT_NAME")), resource.TestCheckResourceAttr( resourceName, "service_name", fmt.Sprintf("test-acc-sr-os-sec-plugin-%s", rName), ), resource.TestCheckResourceAttr( - resourceName, "admin_password", openSearchSecurityPluginTestPassword, + resourceName, "admin_password", openSearchTestPassword, ), resource.TestCheckResourceAttr(resourceName, "available", "true"), resource.TestCheckResourceAttr(resourceName, "enabled", "true"), @@ -129,7 +126,7 @@ resource "aiven_opensearch_security_plugin_config" "foo" { admin_password = "%s" depends_on = [aiven_opensearch.bar, aiven_opensearch_user.foo] -}`, os.Getenv("AIVEN_PROJECT_NAME"), rName, openSearchSecurityPluginTestPassword), +}`, os.Getenv("AIVEN_PROJECT_NAME"), rName, openSearchTestPassword), ExpectError: regexp.MustCompile("when the OpenSearch Security Plugin is enabled"), }, }, diff --git a/internal/sdkprovider/service/opensearch/opensearch_user_test.go b/internal/sdkprovider/service/opensearch/opensearch_user_test.go index 87da3b0c4..bcad62731 100644 --- a/internal/sdkprovider/service/opensearch/opensearch_user_test.go +++ b/internal/sdkprovider/service/opensearch/opensearch_user_test.go @@ -15,6 +15,9 @@ import ( "github.com/aiven/terraform-provider-aiven/internal/schemautil" ) +// openSearchTestPassword is the password used for the OpenSearch tests. +const openSearchTestPassword = "ThisIsATest123^=^" + func TestAccAivenOpenSearchUser_basic(t *testing.T) { resourceName := "aiven_opensearch_user.foo" rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) @@ -31,7 +34,7 @@ func TestAccAivenOpenSearchUser_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "service_name", fmt.Sprintf("test-acc-sr-%s", rName)), resource.TestCheckResourceAttr(resourceName, "project", os.Getenv("AIVEN_PROJECT_NAME")), resource.TestCheckResourceAttr(resourceName, "username", fmt.Sprintf("user-%s", rName)), - resource.TestCheckResourceAttr(resourceName, "password", "Test$1234"), + resource.TestCheckResourceAttr(resourceName, "password", openSearchTestPassword), ), }, }, @@ -72,14 +75,14 @@ func testAccCheckAivenOpenSearchUserResourceDestroy(s *terraform.State) error { func testAccOpenSearchUserResource(name string) string { return fmt.Sprintf(` data "aiven_project" "foo" { - project = "%s" + project = "%[1]s" } resource "aiven_opensearch" "bar" { project = data.aiven_project.foo.project cloud_name = "google-europe-west1" plan = "startup-4" - service_name = "test-acc-sr-%s" + service_name = "test-acc-sr-%[2]s" maintenance_window_dow = "monday" maintenance_window_time = "10:00:00" } @@ -87,13 +90,13 @@ resource "aiven_opensearch" "bar" { resource "aiven_opensearch_user" "foo" { service_name = aiven_opensearch.bar.service_name project = data.aiven_project.foo.project - username = "user-%s" - password = "Test$1234" + username = "user-%[2]s" + password = "%[3]s" } data "aiven_opensearch_user" "user" { service_name = aiven_opensearch_user.foo.service_name project = aiven_opensearch_user.foo.project username = aiven_opensearch_user.foo.username -}`, os.Getenv("AIVEN_PROJECT_NAME"), name, name) +}`, os.Getenv("AIVEN_PROJECT_NAME"), name, openSearchTestPassword) } From a733986b1d6ebda5c55ae12479d4d34aec464362 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Oct 2023 00:52:37 +0000 Subject: [PATCH 16/27] build(deps): bump github.com/aiven/go-api-schemas from 1.36.0 to 1.37.0 (#1396) --- docs/data-sources/kafka.md | 4 ++ docs/resources/kafka.md | 4 ++ go.mod | 2 +- go.sum | 4 +- .../userconfig/apiconvert/fromapi_test.go | 4 ++ .../userconfig/dist/service_types.go | 40 +++++++++++++++++++ 6 files changed, 55 insertions(+), 3 deletions(-) diff --git a/docs/data-sources/kafka.md b/docs/data-sources/kafka.md index 0d56c4253..f2c8b3a45 100644 --- a/docs/data-sources/kafka.md +++ b/docs/data-sources/kafka.md @@ -162,6 +162,10 @@ Read-Only: - `producer_purgatory_purge_interval_requests` (Number) - `replica_fetch_max_bytes` (Number) - `replica_fetch_response_max_bytes` (Number) +- `sasl_oauthbearer_expected_audience` (String) +- `sasl_oauthbearer_expected_issuer` (String) +- `sasl_oauthbearer_jwks_endpoint_url` (String) +- `sasl_oauthbearer_sub_claim_name` (String) - `socket_request_max_bytes` (Number) - `transaction_remove_expired_transaction_cleanup_interval_ms` (Number) - `transaction_state_log_segment_bytes` (Number) diff --git a/docs/resources/kafka.md b/docs/resources/kafka.md index 5089332d5..4ad8b2c34 100644 --- a/docs/resources/kafka.md +++ b/docs/resources/kafka.md @@ -164,6 +164,10 @@ Optional: - `producer_purgatory_purge_interval_requests` (Number) The purge interval (in number of requests) of the producer request purgatory(defaults to 1000). - `replica_fetch_max_bytes` (Number) The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. - `replica_fetch_response_max_bytes` (Number) Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. +- `sasl_oauthbearer_expected_audience` (String) The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. +- `sasl_oauthbearer_expected_issuer` (String) Optional setting for the broker to use to verify that the JWT was created by the expected issuer. +- `sasl_oauthbearer_jwks_endpoint_url` (String) OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. . +- `sasl_oauthbearer_sub_claim_name` (String) Name of the scope from which to extract the subject claim from the JWT. Defaults to sub. - `socket_request_max_bytes` (Number) The maximum number of bytes in a socket request (defaults to 104857600). - `transaction_remove_expired_transaction_cleanup_interval_ms` (Number) The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)). - `transaction_state_log_segment_bytes` (Number) The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)). diff --git a/go.mod b/go.mod index da817a758..41fa9bee5 100644 --- a/go.mod +++ b/go.mod @@ -46,7 +46,7 @@ require ( cloud.google.com/go v0.110.0 // indirect cloud.google.com/go/storage v1.28.1 // indirect github.com/agext/levenshtein v1.2.3 // indirect - github.com/aiven/go-api-schemas v1.36.0 + github.com/aiven/go-api-schemas v1.37.0 github.com/aws/aws-sdk-go v1.44.122 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/davecgh/go-spew v1.1.1 // indirect diff --git a/go.sum b/go.sum index ec585faac..fe20dd25f 100644 --- a/go.sum +++ b/go.sum @@ -203,8 +203,8 @@ github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7l github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/aiven/aiven-go-client/v2 v2.1.0 h1:n8k34HpEQ7KgxRcyX/F5WKR6xh8MSAM6TtPHnghDNGg= github.com/aiven/aiven-go-client/v2 v2.1.0/go.mod h1:x0xhzxWEKAwKv0xY5FvECiI6tesWshcPHvjwl0B/1SU= -github.com/aiven/go-api-schemas v1.36.0 h1:v74EatWotTayXaVWLgx26S1IikGCkmRsnXDrt3yHYes= -github.com/aiven/go-api-schemas v1.36.0/go.mod h1:/bPxBUHza/2Aeer6hIIdB++GxKiw9K1KCBtRa2rtZ5I= +github.com/aiven/go-api-schemas v1.37.0 h1:XkJudUJFMd5Ox35iIB9kw5I9jpAGReSHpuSFCOYFjig= +github.com/aiven/go-api-schemas v1.37.0/go.mod h1:/bPxBUHza/2Aeer6hIIdB++GxKiw9K1KCBtRa2rtZ5I= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= diff --git a/internal/schemautil/userconfig/apiconvert/fromapi_test.go b/internal/schemautil/userconfig/apiconvert/fromapi_test.go index e9af74654..ab051ab8b 100644 --- a/internal/schemautil/userconfig/apiconvert/fromapi_test.go +++ b/internal/schemautil/userconfig/apiconvert/fromapi_test.go @@ -132,6 +132,10 @@ func TestFromAPI(t *testing.T) { "producer_purgatory_purge_interval_requests": 0, "replica_fetch_max_bytes": 0, "replica_fetch_response_max_bytes": 0, + "sasl_oauthbearer_expected_audience": "", + "sasl_oauthbearer_expected_issuer": "", + "sasl_oauthbearer_jwks_endpoint_url": "", + "sasl_oauthbearer_sub_claim_name": "", "socket_request_max_bytes": 0, "transaction_remove_expired_transaction_cleanup_interval_ms": 0, "transaction_state_log_segment_bytes": 0, diff --git a/internal/schemautil/userconfig/dist/service_types.go b/internal/schemautil/userconfig/dist/service_types.go index 337cbd298..53bf02569 100644 --- a/internal/schemautil/userconfig/dist/service_types.go +++ b/internal/schemautil/userconfig/dist/service_types.go @@ -2535,6 +2535,26 @@ func ServiceTypeKafka() *schema.Schema { Optional: true, Type: schema.TypeInt, }, + "sasl_oauthbearer_expected_audience": { + Description: "The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.", + Optional: true, + Type: schema.TypeString, + }, + "sasl_oauthbearer_expected_issuer": { + Description: "Optional setting for the broker to use to verify that the JWT was created by the expected issuer.", + Optional: true, + Type: schema.TypeString, + }, + "sasl_oauthbearer_jwks_endpoint_url": { + Description: "OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. .", + Optional: true, + Type: schema.TypeString, + }, + "sasl_oauthbearer_sub_claim_name": { + Description: "Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.", + Optional: true, + Type: schema.TypeString, + }, "socket_request_max_bytes": { Description: "The maximum number of bytes in a socket request (defaults to 104857600).", Optional: true, @@ -2742,6 +2762,26 @@ func ServiceTypeKafka() *schema.Schema { Optional: true, Type: schema.TypeInt, }, + "sasl_oauthbearer_expected_audience": { + Description: "The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.", + Optional: true, + Type: schema.TypeString, + }, + "sasl_oauthbearer_expected_issuer": { + Description: "Optional setting for the broker to use to verify that the JWT was created by the expected issuer.", + Optional: true, + Type: schema.TypeString, + }, + "sasl_oauthbearer_jwks_endpoint_url": { + Description: "OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. .", + Optional: true, + Type: schema.TypeString, + }, + "sasl_oauthbearer_sub_claim_name": { + Description: "Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.", + Optional: true, + Type: schema.TypeString, + }, "socket_request_max_bytes": { Description: "The maximum number of bytes in a socket request (defaults to 104857600).", Optional: true, From 387b6ada1d0ea0f439f12a497f4d309f3ac27aaa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Oct 2023 22:20:07 +0000 Subject: [PATCH 17/27] build(deps): bump github.com/gruntwork-io/terratest from 0.46.0 to 0.46.1 (#1397) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 41fa9bee5..ddce2846b 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/docker/go-units v0.5.0 github.com/ettle/strcase v0.1.1 github.com/google/go-cmp v0.6.0 - github.com/gruntwork-io/terratest v0.46.0 + github.com/gruntwork-io/terratest v0.46.1 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/terraform-plugin-framework v1.4.1 github.com/hashicorp/terraform-plugin-go v0.19.0 diff --git a/go.sum b/go.sum index fe20dd25f..5054ab885 100644 --- a/go.sum +++ b/go.sum @@ -379,8 +379,8 @@ github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6c github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/gruntwork-io/terratest v0.46.0 h1:ezeJ045eOniWO+0T78SFQrVo6tIJ2or/DBtYvJnweOs= -github.com/gruntwork-io/terratest v0.46.0/go.mod h1:4TWB5SYgATxJFfg+RNpE0gwiUWxtfMLGOXo5gwcGgMs= +github.com/gruntwork-io/terratest v0.46.1 h1:dJ/y2/Li6yCDIc8KXY8PfydtrMRiXFb3UZm4LoPShPI= +github.com/gruntwork-io/terratest v0.46.1/go.mod h1:gl//tb5cLnbpQs1FTSNwhsrbhsoG00goCJPfOnyliiU= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= From 53012655522af51cf9f39349e6fd995f9c54a6bf Mon Sep 17 00:00:00 2001 From: Aleksander Zaruczewski Date: Fri, 20 Oct 2023 04:08:22 -0400 Subject: [PATCH 18/27] test: fix failing billing group test (#1398) --- .../sdkprovider/service/project/billing_group_test.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/internal/sdkprovider/service/project/billing_group_test.go b/internal/sdkprovider/service/project/billing_group_test.go index 512844074..dd4f2d78e 100644 --- a/internal/sdkprovider/service/project/billing_group_test.go +++ b/internal/sdkprovider/service/project/billing_group_test.go @@ -69,8 +69,12 @@ func testAccCheckAivenBillingGroupResourceDestroy(s *terraform.State) error { func testAccBillingGroupResource(name string) string { return fmt.Sprintf(` +resource "aiven_organization" "foo" { + name = "test-acc-org-%[1]s" +} + resource "aiven_billing_group" "foo" { - name = "test-acc-bg-%s" + name = "test-acc-bg-%[1]s" billing_emails = ["ivan.savciuc+test1@aiven.fi", "ivan.savciuc+test2@aiven.fi"] } @@ -79,11 +83,12 @@ data "aiven_billing_group" "bar" { } resource "aiven_project" "pr1" { - project = "test-acc-pr-%s" + project = "test-acc-pr-%[1]s" billing_group = aiven_billing_group.foo.id + parent_id = aiven_organization.foo.id depends_on = [aiven_billing_group.foo] -}`, name, name) +}`, name) } func testCopyBillingGroupFromExistingOne(name string) string { From ea22abf726c52707f915d527ac8b5dedc5644aab Mon Sep 17 00:00:00 2001 From: Ivan Savciuc Date: Fri, 20 Oct 2023 17:17:51 +0300 Subject: [PATCH 19/27] chore: deprecating account and project user (#1390) --- CHANGELOG.md | 1 + internal/sdkprovider/service/account/account_team.go | 3 ++- internal/sdkprovider/service/account/account_team_member.go | 3 ++- internal/sdkprovider/service/project/project_user.go | 3 ++- 4 files changed, 7 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cc0ee9d69..95fc8b215 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ nav_order: 1 # Changelog +- Deprecating `project_user`, `account_team` and `account_team_member resources` ## [5.0.0] - YYYY-MM-DD - Migrate `aiven_service_integration` to the Plugin Framework diff --git a/internal/sdkprovider/service/account/account_team.go b/internal/sdkprovider/service/account/account_team.go index 13c93e285..5a7ecd6b7 100644 --- a/internal/sdkprovider/service/account/account_team.go +++ b/internal/sdkprovider/service/account/account_team.go @@ -50,7 +50,8 @@ func ResourceAccountTeam() *schema.Resource { }, Timeouts: schemautil.DefaultResourceTimeouts(), - Schema: aivenAccountTeamSchema, + Schema: aivenAccountTeamSchema, + DeprecationMessage: "This resource is deprecated", } } diff --git a/internal/sdkprovider/service/account/account_team_member.go b/internal/sdkprovider/service/account/account_team_member.go index 2c26c0faf..0b7a010e5 100644 --- a/internal/sdkprovider/service/account/account_team_member.go +++ b/internal/sdkprovider/service/account/account_team_member.go @@ -67,7 +67,8 @@ eliminate an account team member if one has accepted an invitation previously. }, Timeouts: schemautil.DefaultResourceTimeouts(), - Schema: aivenAccountTeamMemberSchema, + Schema: aivenAccountTeamMemberSchema, + DeprecationMessage: "This resource is deprecated", } } diff --git a/internal/sdkprovider/service/project/project_user.go b/internal/sdkprovider/service/project/project_user.go index 683b0448b..7480f0bb3 100644 --- a/internal/sdkprovider/service/project/project_user.go +++ b/internal/sdkprovider/service/project/project_user.go @@ -44,7 +44,8 @@ func ResourceProjectUser() *schema.Resource { }, Timeouts: schemautil.DefaultResourceTimeouts(), - Schema: aivenProjectUserSchema, + Schema: aivenProjectUserSchema, + DeprecationMessage: "This resource is deprecated", } } From a5c93f7cf044fd2a924e10fba79919d389a931db Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 22 Oct 2023 21:05:30 +0000 Subject: [PATCH 20/27] build(deps): bump github.com/aiven/go-api-schemas from 1.37.0 to 1.38.0 (#1399) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ddce2846b..079de931b 100644 --- a/go.mod +++ b/go.mod @@ -46,7 +46,7 @@ require ( cloud.google.com/go v0.110.0 // indirect cloud.google.com/go/storage v1.28.1 // indirect github.com/agext/levenshtein v1.2.3 // indirect - github.com/aiven/go-api-schemas v1.37.0 + github.com/aiven/go-api-schemas v1.38.0 github.com/aws/aws-sdk-go v1.44.122 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/davecgh/go-spew v1.1.1 // indirect diff --git a/go.sum b/go.sum index 5054ab885..1f66c7661 100644 --- a/go.sum +++ b/go.sum @@ -203,8 +203,8 @@ github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7l github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/aiven/aiven-go-client/v2 v2.1.0 h1:n8k34HpEQ7KgxRcyX/F5WKR6xh8MSAM6TtPHnghDNGg= github.com/aiven/aiven-go-client/v2 v2.1.0/go.mod h1:x0xhzxWEKAwKv0xY5FvECiI6tesWshcPHvjwl0B/1SU= -github.com/aiven/go-api-schemas v1.37.0 h1:XkJudUJFMd5Ox35iIB9kw5I9jpAGReSHpuSFCOYFjig= -github.com/aiven/go-api-schemas v1.37.0/go.mod h1:/bPxBUHza/2Aeer6hIIdB++GxKiw9K1KCBtRa2rtZ5I= +github.com/aiven/go-api-schemas v1.38.0 h1:l+OOMYRG1tddTFQN4OihE6+sxVdP5fKeRN/9uoKFnkk= +github.com/aiven/go-api-schemas v1.38.0/go.mod h1:/bPxBUHza/2Aeer6hIIdB++GxKiw9K1KCBtRa2rtZ5I= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= From 3add68a29526aa99551c4f85393e97382e62562d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 22 Oct 2023 17:12:01 -0400 Subject: [PATCH 21/27] build(deps): bump github.com/golangci/golangci-lint from 1.54.1 to 1.55.0 in /tools (#1403) --- tools/go.mod | 74 ++++++++++++----------- tools/go.sum | 162 +++++++++++++++++++++++++++------------------------ 2 files changed, 127 insertions(+), 109 deletions(-) diff --git a/tools/go.mod b/tools/go.mod index 2c788399b..4231d0375 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -3,7 +3,7 @@ module github.com/aiven/terraform-provider-aiven/tools go 1.19 require ( - github.com/golangci/golangci-lint v1.54.1 + github.com/golangci/golangci-lint v1.55.0 github.com/hashicorp/terraform-plugin-docs v0.16.0 github.com/katbyte/terrafmt v0.5.2 ) @@ -11,10 +11,11 @@ require ( require ( 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect 4d63.com/gochecknoglobals v0.2.1 // indirect - github.com/4meepo/tagalign v1.3.2 // indirect - github.com/Abirdcfly/dupword v0.0.12 // indirect - github.com/Antonboom/errname v0.1.10 // indirect - github.com/Antonboom/nilnil v0.1.5 // indirect + github.com/4meepo/tagalign v1.3.3 // indirect + github.com/Abirdcfly/dupword v0.0.13 // indirect + github.com/Antonboom/errname v0.1.12 // indirect + github.com/Antonboom/nilnil v0.1.7 // indirect + github.com/Antonboom/testifylint v0.2.3 // indirect github.com/BurntSushi/toml v1.3.2 // indirect github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0 // indirect @@ -25,6 +26,7 @@ require ( github.com/OpenPeeDeeP/depguard/v2 v2.1.0 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect github.com/agext/levenshtein v1.2.3 // indirect + github.com/alecthomas/go-check-sumtype v0.1.3 // indirect github.com/alexkohler/nakedret/v2 v2.0.2 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect github.com/alingse/asasalint v0.0.11 // indirect @@ -37,16 +39,18 @@ require ( github.com/bkielbasa/cyclop v1.2.1 // indirect github.com/blizzy78/varnamelen v0.8.0 // indirect github.com/bombsimon/wsl/v3 v3.4.0 // indirect - github.com/breml/bidichk v0.2.4 // indirect - github.com/breml/errchkjson v0.3.1 // indirect - github.com/butuzov/ireturn v0.2.0 // indirect + github.com/breml/bidichk v0.2.7 // indirect + github.com/breml/errchkjson v0.3.6 // indirect + github.com/butuzov/ireturn v0.2.1 // indirect github.com/butuzov/mirror v1.1.0 // indirect + github.com/catenacyber/perfsprint v0.2.0 // indirect + github.com/ccojocar/zxcvbn-go v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/charithe/durationcheck v0.0.10 // indirect - github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8 // indirect + github.com/chavacava/garif v0.1.0 // indirect github.com/cloudflare/circl v1.3.3 // indirect github.com/curioswitch/go-reassign v0.2.0 // indirect - github.com/daixiang0/gci v0.11.0 // indirect + github.com/daixiang0/gci v0.11.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/denis-tingaikin/go-header v0.4.3 // indirect github.com/esimonov/ifshort v1.0.4 // indirect @@ -56,6 +60,7 @@ require ( github.com/firefart/nonamedreturns v1.0.4 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/fzipp/gocyclo v0.6.0 // indirect + github.com/ghostiam/protogetter v0.2.3 // indirect github.com/go-critic/go-critic v0.9.0 // indirect github.com/go-toolsmith/astcast v1.1.0 // indirect github.com/go-toolsmith/astcopy v1.1.0 // indirect @@ -71,15 +76,15 @@ require ( github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect - github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 // indirect + github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e // indirect github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect github.com/golangci/misspell v0.4.1 // indirect - github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 // indirect + github.com/golangci/revgrep v0.5.0 // indirect github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect github.com/google/go-cmp v0.5.9 // indirect - github.com/google/uuid v1.3.0 // indirect - github.com/gookit/color v1.5.3 // indirect + github.com/google/uuid v1.3.1 // indirect + github.com/gookit/color v1.5.4 // indirect github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601 // indirect github.com/gostaticanalysis/analysisutil v0.7.1 // indirect github.com/gostaticanalysis/comment v1.4.2 // indirect @@ -100,7 +105,7 @@ require ( github.com/huandu/xstrings v1.4.0 // indirect github.com/imdario/mergo v0.3.15 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/jgautheron/goconst v1.5.1 // indirect + github.com/jgautheron/goconst v1.6.0 // indirect github.com/jingyugao/rowserrcheck v1.1.1 // indirect github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect github.com/julz/importas v0.1.0 // indirect @@ -116,6 +121,7 @@ require ( github.com/ldez/tagliatelle v0.5.0 // indirect github.com/leonklingele/grouper v1.1.1 // indirect github.com/lufeee/execinquery v1.2.1 // indirect + github.com/macabu/inamedparam v0.1.2 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/maratori/testableexamples v1.0.0 // indirect github.com/maratori/testpackage v1.1.1 // indirect @@ -125,7 +131,7 @@ require ( github.com/mattn/go-runewidth v0.0.14 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mbilski/exhaustivestruct v1.2.0 // indirect - github.com/mgechev/revive v1.3.2 // indirect + github.com/mgechev/revive v1.3.4 // indirect github.com/mitchellh/cli v1.1.5 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect @@ -134,14 +140,13 @@ require ( github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moricho/tparallel v0.3.1 // indirect github.com/nakabonne/nestif v0.3.1 // indirect - github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect github.com/nishanths/exhaustive v0.11.0 // indirect github.com/nishanths/predeclared v0.2.2 // indirect - github.com/nunnatsa/ginkgolinter v0.13.3 // indirect + github.com/nunnatsa/ginkgolinter v0.14.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/pelletier/go-toml/v2 v2.0.7 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/polyfloyd/go-errorlint v1.4.3 // indirect + github.com/polyfloyd/go-errorlint v1.4.5 // indirect github.com/posener/complete v1.2.3 // indirect github.com/prometheus/client_golang v1.14.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect @@ -154,11 +159,11 @@ require ( github.com/rivo/uniseg v0.4.4 // indirect github.com/russross/blackfriday v1.6.0 // indirect github.com/ryancurrah/gomodguard v1.3.0 // indirect - github.com/ryanrolds/sqlclosecheck v0.4.0 // indirect + github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect github.com/sashamelentyev/interfacebloat v1.1.0 // indirect - github.com/sashamelentyev/usestdlibvars v1.23.0 // indirect - github.com/securego/gosec/v2 v2.16.0 // indirect + github.com/sashamelentyev/usestdlibvars v1.24.0 // indirect + github.com/securego/gosec/v2 v2.18.1 // indirect github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect github.com/shopspring/decimal v1.3.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect @@ -180,38 +185,39 @@ require ( github.com/subosito/gotenv v1.4.2 // indirect github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect github.com/tdakkota/asciicheck v0.2.0 // indirect - github.com/tetafro/godot v1.4.11 // indirect + github.com/tetafro/godot v1.4.15 // indirect github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 // indirect github.com/timonwong/loggercheck v0.9.4 // indirect github.com/tomarrell/wrapcheck/v2 v2.8.1 // indirect github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect github.com/ultraware/funlen v0.1.0 // indirect github.com/ultraware/whitespace v0.0.5 // indirect - github.com/uudashr/gocognit v1.0.7 // indirect - github.com/xen0n/gosmopolitan v1.2.1 // indirect + github.com/uudashr/gocognit v1.1.2 // indirect + github.com/xen0n/gosmopolitan v1.2.2 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect github.com/yagipy/maintidx v1.0.0 // indirect github.com/yeya24/promlinter v0.2.0 // indirect github.com/ykadowak/zerologlint v0.1.3 // indirect github.com/zclconf/go-cty v1.13.2 // indirect - gitlab.com/bosi/decorder v0.4.0 // indirect - go.tmz.dev/musttag v0.7.1 // indirect + gitlab.com/bosi/decorder v0.4.1 // indirect + go-simpler.org/sloglint v0.1.2 // indirect + go.tmz.dev/musttag v0.7.2 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.24.0 // indirect - golang.org/x/crypto v0.9.0 // indirect + golang.org/x/crypto v0.14.0 // indirect golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df // indirect golang.org/x/exp/typeparams v0.0.0-20230321023759-10a507213a29 // indirect - golang.org/x/mod v0.12.0 // indirect - golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.11.0 // indirect - golang.org/x/text v0.11.0 // indirect - golang.org/x/tools v0.12.0 // indirect + golang.org/x/mod v0.13.0 // indirect + golang.org/x/sync v0.4.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect + golang.org/x/tools v0.14.0 // indirect google.golang.org/protobuf v1.30.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - honnef.co/go/tools v0.4.3 // indirect + honnef.co/go/tools v0.4.6 // indirect mvdan.cc/gofumpt v0.5.0 // indirect mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect diff --git a/tools/go.sum b/tools/go.sum index 42ac9617d..073cff95c 100644 --- a/tools/go.sum +++ b/tools/go.sum @@ -40,14 +40,16 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/4meepo/tagalign v1.3.2 h1:1idD3yxlRGV18VjqtDbqYvQ5pXqQS0wO2dn6M3XstvI= -github.com/4meepo/tagalign v1.3.2/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE= -github.com/Abirdcfly/dupword v0.0.12 h1:56NnOyrXzChj07BDFjeRA+IUzSz01jmzEq+G4kEgFhc= -github.com/Abirdcfly/dupword v0.0.12/go.mod h1:+us/TGct/nI9Ndcbcp3rgNcQzctTj68pq7TcgNpLfdI= -github.com/Antonboom/errname v0.1.10 h1:RZ7cYo/GuZqjr1nuJLNe8ZH+a+Jd9DaZzttWzak9Bls= -github.com/Antonboom/errname v0.1.10/go.mod h1:xLeiCIrvVNpUtsN0wxAh05bNIZpqE22/qDMnTBTttiA= -github.com/Antonboom/nilnil v0.1.5 h1:X2JAdEVcbPaOom2TUa1FxZ3uyuUlex0XMLGYMemu6l0= -github.com/Antonboom/nilnil v0.1.5/go.mod h1:I24toVuBKhfP5teihGWctrRiPbRKHwZIFOvc6v3HZXk= +github.com/4meepo/tagalign v1.3.3 h1:ZsOxcwGD/jP4U/aw7qeWu58i7dwYemfy5Y+IF1ACoNw= +github.com/4meepo/tagalign v1.3.3/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE= +github.com/Abirdcfly/dupword v0.0.13 h1:SMS17YXypwP000fA7Lr+kfyBQyW14tTT+nRv9ASwUUo= +github.com/Abirdcfly/dupword v0.0.13/go.mod h1:Ut6Ue2KgF/kCOawpW4LnExT+xZLQviJPE4klBPMK/5Y= +github.com/Antonboom/errname v0.1.12 h1:oh9ak2zUtsLp5oaEd/erjB4GPu9w19NyoIskZClDcQY= +github.com/Antonboom/errname v0.1.12/go.mod h1:bK7todrzvlaZoQagP1orKzWXv59X/x0W0Io2XT1Ssro= +github.com/Antonboom/nilnil v0.1.7 h1:ofgL+BA7vlA1K2wNQOsHzLJ2Pw5B5DpWRLdDAVvvTow= +github.com/Antonboom/nilnil v0.1.7/go.mod h1:TP+ScQWVEq0eSIxqU8CbdT5DFWoHp0MbP+KMUO1BKYQ= +github.com/Antonboom/testifylint v0.2.3 h1:MFq9zyL+rIVpsvLX4vDPLojgN7qODzWsrnftNX2Qh60= +github.com/Antonboom/testifylint v0.2.3/go.mod h1:IYaXaOX9NbfAyO+Y04nfjGI8wDemC1rUyM/cYolz018= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= @@ -74,6 +76,10 @@ github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0g github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/alecthomas/assert/v2 v2.2.2 h1:Z/iVC0xZfWTaFNE6bA3z07T86hd45Xe2eLt6WVy2bbk= +github.com/alecthomas/go-check-sumtype v0.1.3 h1:M+tqMxB68hcgccRXBMVCPI4UJ+QUfdSx0xdbypKCqA8= +github.com/alecthomas/go-check-sumtype v0.1.3/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ= +github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -107,15 +113,19 @@ github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= github.com/bombsimon/wsl/v3 v3.4.0 h1:RkSxjT3tmlptwfgEgTgU+KYKLI35p/tviNXNXiL2aNU= github.com/bombsimon/wsl/v3 v3.4.0/go.mod h1:KkIB+TXkqy6MvK9BDZVbZxKNYsE1/oLRJbIFtf14qqo= -github.com/breml/bidichk v0.2.4 h1:i3yedFWWQ7YzjdZJHnPo9d/xURinSq3OM+gyM43K4/8= -github.com/breml/bidichk v0.2.4/go.mod h1:7Zk0kRFt1LIZxtQdl9W9JwGAcLTTkOs+tN7wuEYGJ3s= -github.com/breml/errchkjson v0.3.1 h1:hlIeXuspTyt8Y/UmP5qy1JocGNR00KQHgfaNtRAjoxQ= -github.com/breml/errchkjson v0.3.1/go.mod h1:XroxrzKjdiutFyW3nWhw34VGg7kiMsDQox73yWCGI2U= -github.com/butuzov/ireturn v0.2.0 h1:kCHi+YzC150GE98WFuZQu9yrTn6GEydO2AuPLbTgnO4= -github.com/butuzov/ireturn v0.2.0/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= +github.com/breml/bidichk v0.2.7 h1:dAkKQPLl/Qrk7hnP6P+E0xOodrq8Us7+U0o4UBOAlQY= +github.com/breml/bidichk v0.2.7/go.mod h1:YodjipAGI9fGcYM7II6wFvGhdMYsC5pHDlGzqvEW3tQ= +github.com/breml/errchkjson v0.3.6 h1:VLhVkqSBH96AvXEyclMR37rZslRrY2kcyq+31HCsVrA= +github.com/breml/errchkjson v0.3.6/go.mod h1:jhSDoFheAF2RSDOlCfhHO9KqhZgAYLyvHe7bRCX8f/U= +github.com/butuzov/ireturn v0.2.1 h1:w5Ks4tnfeFDZskGJ2x1GAkx5gaQV+kdU3NKNr3NEBzY= +github.com/butuzov/ireturn v0.2.1/go.mod h1:RfGHUvvAuFFxoHKf4Z8Yxuh6OjlCw1KvR2zM1NFHeBk= github.com/butuzov/mirror v1.1.0 h1:ZqX54gBVMXu78QLoiqdwpl2mgmoOJTk7s4p4o+0avZI= github.com/butuzov/mirror v1.1.0/go.mod h1:8Q0BdQU6rC6WILDiBM60DBfvV78OLJmMmixe7GF45AE= github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/catenacyber/perfsprint v0.2.0 h1:azOocHLscPjqXVJ7Mf14Zjlkn4uNua0+Hcg1wTR6vUo= +github.com/catenacyber/perfsprint v0.2.0/go.mod h1:/wclWYompEyjUD2FuIIDVKNkqz7IgBIWXIH3V0Zol50= +github.com/ccojocar/zxcvbn-go v1.0.1 h1:+sxrANSCj6CdadkcMnvde/GWU1vZiiXRbqYSCalV4/4= +github.com/ccojocar/zxcvbn-go v1.0.1/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -123,8 +133,8 @@ github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4= github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= -github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8 h1:W9o46d2kbNL06lq7UNDPV0zYLzkrde/bjIqO02eoll0= -github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8/go.mod h1:gakxgyXaaPkxvLw1XQxNGK4I37ys9iBRzNUx/B7pUCo= +github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc= +github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -138,8 +148,8 @@ github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnht github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= -github.com/daixiang0/gci v0.11.0 h1:XeQbFKkCRxvVyn06EOuNY6LPGBLVuB/W130c8FrnX6A= -github.com/daixiang0/gci v0.11.0/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI= +github.com/daixiang0/gci v0.11.2 h1:Oji+oPsp3bQ6bNNgX30NBAVT18P4uBH4sRZnlOlTj7Y= +github.com/daixiang0/gci v0.11.2/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -168,6 +178,8 @@ github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4 github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= +github.com/ghostiam/protogetter v0.2.3 h1:qdv2pzo3BpLqezwqfGDLZ+nHEYmc5bUpIdsMbBVwMjw= +github.com/ghostiam/protogetter v0.2.3/go.mod h1:KmNLOsy1v04hKbvZs8EfGI1fk39AgTdRDxWNYPfXVc4= github.com/go-critic/go-critic v0.9.0 h1:Pmys9qvU3pSML/3GEQ2Xd9RZ/ip+aXHKILuxczKGV/U= github.com/go-critic/go-critic v0.9.0/go.mod h1:5P8tdXL7m/6qnyG6oRAlYLORvoXH0WDypYgAEmagT40= github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= @@ -245,18 +257,18 @@ github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9 github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6J5HIP8ZtyMdiDscjMLfRBSPuzVVeo= github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ= -github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 h1:amWTbTGqOZ71ruzrdA+Nx5WA3tV1N0goTspwmKCQvBY= -github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2/go.mod h1:9wOXstvyDRshQ9LggQuzBCGysxs3b6Uo/1MvYCR2NMs= -github.com/golangci/golangci-lint v1.54.1 h1:0qMrH1gkeIBqCZaaAm5Fwq4xys9rO/lJofHfZURIFFk= -github.com/golangci/golangci-lint v1.54.1/go.mod h1:JK47+qksV/t2mAz9YvndwT0ZLW4A1rvDljOs3g9jblo= +github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e h1:ULcKCDV1LOZPFxGZaA6TlQbiM3J2GCPnkx/bGF6sX/g= +github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e/go.mod h1:Pm5KhLPA8gSnQwrQ6ukebRcapGb/BG9iUkdaiCcGHJM= +github.com/golangci/golangci-lint v1.55.0 h1:ePpc6YhM1ZV8kHU8dwmHDHAdeedZHdK8cmTXlkkRdi8= +github.com/golangci/golangci-lint v1.55.0/go.mod h1:Z/OawFQ4yqFo2/plDYlIjoZlJeVYkRcqS9dW55p0FXg= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= github.com/golangci/misspell v0.4.1 h1:+y73iSicVy2PqyX7kmUefHusENlrP9YwuHZHPLGQj/g= github.com/golangci/misspell v0.4.1/go.mod h1:9mAN1quEo3DlpbaIKKyEvRxK1pwqR9s/Sea1bJCtlNI= -github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 h1:DIPQnGy2Gv2FSA4B/hh8Q7xx3B7AIDk3DAMeHclH1vQ= -github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs= +github.com/golangci/revgrep v0.5.0 h1:GGBqHFtFOeHiSUQtFVZXPJtVZYOGB4iVlAjaoFRBQvY= +github.com/golangci/revgrep v0.5.0/go.mod h1:bjAMA+Sh/QUfTDcHzxfyHxr4xKvllVr/0sCv2e7jJHA= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -293,13 +305,13 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gookit/color v1.5.3 h1:twfIhZs4QLCtimkP7MOxlF3A0U/5cDPseRT9M/+2SCE= -github.com/gookit/color v1.5.3/go.mod h1:NUzwzeehUfl7GIb36pqId+UGmRfQcU/WiiyTTeNjHtE= +github.com/gookit/color v1.5.4 h1:FZmqs7XOyGgCAxmWyPslpiok1k05wmY3SJTytgvYFs0= +github.com/gookit/color v1.5.4/go.mod h1:pZJOeOS8DM43rXbp4AZo1n9zCU2qjpcRko0b6/QJi9w= github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601 h1:mrEEilTAUmaAORhssPPkxj84TsHrPMLBGW2Z4SoTxm8= github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= @@ -359,8 +371,8 @@ github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+h github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= -github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM= -github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jgautheron/goconst v1.6.0 h1:gbMLWKRMkzAc6kYsQL6/TxaoBUg3Jm9LSF/Ih1ADWGA= +github.com/jgautheron/goconst v1.6.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= @@ -413,6 +425,8 @@ github.com/leonklingele/grouper v1.1.1 h1:suWXRU57D4/Enn6pXR0QVqqWWrnJ9Osrz+5rjt github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= +github.com/macabu/inamedparam v0.1.2 h1:RR5cnayM6Q7cDhQol32DE2BGAPGMnffJ31LFE+UklaU= +github.com/macabu/inamedparam v0.1.2/go.mod h1:Xg25QvY7IBRl1KLPV9Rbml8JOMZtF/iAkNkmV7eQgjw= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI= @@ -438,8 +452,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo= github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= -github.com/mgechev/revive v1.3.2 h1:Wb8NQKBaALBJ3xrrj4zpwJwqwNA6nDpyJSEQWcCka6U= -github.com/mgechev/revive v1.3.2/go.mod h1:UCLtc7o5vg5aXCwdUTU1kEBQ1v+YXPAkYDIDXbrs5I0= +github.com/mgechev/revive v1.3.4 h1:k/tO3XTaWY4DEHal9tWBkkUMJYO/dLDVyMmAQxmIMDc= +github.com/mgechev/revive v1.3.4/go.mod h1:W+pZCMu9qj8Uhfs1iJMQsEFLRozUfvwFwqVvRbSNLVw= github.com/mitchellh/cli v1.1.5 h1:OxRIeJXpAMztws/XHlN2vu6imG5Dpq+j61AzAX5fLng= github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= @@ -465,20 +479,18 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= -github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA= -github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= github.com/nishanths/exhaustive v0.11.0 h1:T3I8nUGhl/Cwu5Z2hfc92l0e04D2GEW6e0l8pzda2l0= github.com/nishanths/exhaustive v0.11.0/go.mod h1:RqwDsZ1xY0dNdqHho2z6X+bgzizwbLYOWnZbbl2wLB4= github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= -github.com/nunnatsa/ginkgolinter v0.13.3 h1:wEvjrzSMfDdnoWkctignX9QTf4rT9f4GkQ3uVoXBmiU= -github.com/nunnatsa/ginkgolinter v0.13.3/go.mod h1:aTKXo8WddENYxNEFT+4ZxEgWXqlD9uMD3w9Bfw/ABEc= +github.com/nunnatsa/ginkgolinter v0.14.0 h1:XQPNmw+kZz5cC/HbFK3mQutpjzAQv1dHregRA+4CGGg= +github.com/nunnatsa/ginkgolinter v0.14.0/go.mod h1:cm2xaqCUCRd7qcP4DqbVvpcyEMkuLM9CF0wY6VASohk= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k= +github.com/onsi/ginkgo/v2 v2.12.1 h1:uHNEO1RP2SpuZApSkel9nEh1/Mu+hmQe7Q+Pepg5OYA= +github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/copy v1.11.0 h1:OKBD80J/mLBrwnzXqGtFCzprFSGioo30JcmR4APsNwc= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= @@ -493,8 +505,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v1.4.3 h1:P6NALOLV8BrWhm6PsqOraUK05E5h8IZnpXYJ+CIg+0U= -github.com/polyfloyd/go-errorlint v1.4.3/go.mod h1:VPlWPh6hB/wruVG803SuNpLuTGNjLHYlvcdSy4RhdPA= +github.com/polyfloyd/go-errorlint v1.4.5 h1:70YWmMy4FgRHehGNOUask3HtSFSOLKgmDn7ryNe7LqI= +github.com/polyfloyd/go-errorlint v1.4.5/go.mod h1:sIZEbFoDOCnTYYZoVkjc4hTnM459tuWA9H/EkdXwsKk= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= @@ -542,16 +554,16 @@ github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNl github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryancurrah/gomodguard v1.3.0 h1:q15RT/pd6UggBXVBuLps8BXRvl5GPBcwVA7BJHMLuTw= github.com/ryancurrah/gomodguard v1.3.0/go.mod h1:ggBxb3luypPEzqVtq33ee7YSN35V28XeGnid8dnni50= -github.com/ryanrolds/sqlclosecheck v0.4.0 h1:i8SX60Rppc1wRuyQjMciLqIzV3xnoHB7/tXbr6RGYNI= -github.com/ryanrolds/sqlclosecheck v0.4.0/go.mod h1:TBRRjzL31JONc9i4XMinicuo+s+E8yKZ5FN8X3G6CKQ= +github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU= +github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc= github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= -github.com/sashamelentyev/usestdlibvars v1.23.0 h1:01h+/2Kd+NblNItNeux0veSL5cBF1jbEOPrEhDzGYq0= -github.com/sashamelentyev/usestdlibvars v1.23.0/go.mod h1:YPwr/Y1LATzHI93CqoPUN/2BzGQ/6N/cl/KwgR0B/aU= -github.com/securego/gosec/v2 v2.16.0 h1:Pi0JKoasQQ3NnoRao/ww/N/XdynIB9NRYYZT5CyOs5U= -github.com/securego/gosec/v2 v2.16.0/go.mod h1:xvLcVZqUfo4aAQu56TNv7/Ltz6emAOQAEsrZrt7uGlI= +github.com/sashamelentyev/usestdlibvars v1.24.0 h1:MKNzmXtGh5N0y74Z/CIaJh4GlB364l0K1RUT08WSWAc= +github.com/sashamelentyev/usestdlibvars v1.24.0/go.mod h1:9cYkq+gYJ+a5W2RPdhfaSCnTVUC1OQP/bSiiBhq3OZE= +github.com/securego/gosec/v2 v2.18.1 h1:xnnehWg7dIW8qrRPGm8ykY21zp2MueKyC99Vlcuj96I= +github.com/securego/gosec/v2 v2.18.1/go.mod h1:ZUTcKD9gAFip1lLGHWCjkoBQJyaEzePTNzjwlL2HHoE= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= @@ -598,7 +610,6 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -608,7 +619,6 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= @@ -621,8 +631,8 @@ github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= -github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw= -github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= +github.com/tetafro/godot v1.4.15 h1:QzdIs+XB8q+U1WmQEWKHQbKmCw06QuQM7gLx/dky2RM= +github.com/tetafro/godot v1.4.15/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 h1:quvGphlmUVU+nhpFa4gg4yJyTRJ13reZMDHrKwYw53M= github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ= github.com/timonwong/loggercheck v0.9.4 h1:HKKhqrjcVj8sxL7K77beXh0adEm6DLjV/QOGeMXEVi4= @@ -635,11 +645,11 @@ github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81v github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4= github.com/ultraware/whitespace v0.0.5 h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqzi/CzI= github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= -github.com/uudashr/gocognit v1.0.7 h1:e9aFXgKgUJrQ5+bs61zBigmj7bFJ/5cC6HmMahVzuDo= -github.com/uudashr/gocognit v1.0.7/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY= +github.com/uudashr/gocognit v1.1.2 h1:l6BAEKJqQH2UpKAPKdMfZf5kE4W/2xk8pfU1OVLvniI= +github.com/uudashr/gocognit v1.1.2/go.mod h1:aAVdLURqcanke8h3vg35BC++eseDm66Z7KmchI5et4k= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= -github.com/xen0n/gosmopolitan v1.2.1 h1:3pttnTuFumELBRSh+KQs1zcz4fN6Zy7aB0xlnQSn1Iw= -github.com/xen0n/gosmopolitan v1.2.1/go.mod h1:JsHq/Brs1o050OOdmzHeOr0N7OtlnKRAGAsElF8xBQA= +github.com/xen0n/gosmopolitan v1.2.2 h1:/p2KTnMzwRexIW8GlKawsTWOxn7UHA+jCMF/V8HHtvU= +github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg= github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= @@ -657,17 +667,19 @@ github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zclconf/go-cty v1.13.2 h1:4GvrUxe/QUDYuJKAav4EYqdM47/kZa672LwmXFmEKT0= github.com/zclconf/go-cty v1.13.2/go.mod h1:YKQzy/7pZ7iq2jNFzy5go57xdxdWoLLpaEp4u238AE0= -gitlab.com/bosi/decorder v0.4.0 h1:HWuxAhSxIvsITcXeP+iIRg9d1cVfvVkmlF7M68GaoDY= -gitlab.com/bosi/decorder v0.4.0/go.mod h1:xarnteyUoJiOTEldDysquWKTVDCKo2TOIOIibSuWqOg= -go-simpler.org/assert v0.5.0 h1:+5L/lajuQtzmbtEfh69sr5cRf2/xZzyJhFjoOz/PPqs= +gitlab.com/bosi/decorder v0.4.1 h1:VdsdfxhstabyhZovHafFw+9eJ6eU0d2CkFNJcZz/NU4= +gitlab.com/bosi/decorder v0.4.1/go.mod h1:jecSqWUew6Yle1pCr2eLWTensJMmsxHsBwt+PVbkAqA= +go-simpler.org/assert v0.6.0 h1:QxSrXa4oRuo/1eHMXSBFHKvJIpWABayzKldqZyugG7E= +go-simpler.org/sloglint v0.1.2 h1:IjdhF8NPxyn0Ckn2+fuIof7ntSnVUAqBFcQRrnG9AiM= +go-simpler.org/sloglint v0.1.2/go.mod h1:2LL+QImPfTslD5muNPydAEYmpXIj6o/WYcqnJjLi4o4= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.tmz.dev/musttag v0.7.1 h1:9lFmeSFnFfPuMq4IksHGomItE6NgKMNW2Nt2FPOhCfU= -go.tmz.dev/musttag v0.7.1/go.mod h1:oJLkpR56EsIryktZJk/B0IroSMi37YWver47fibGh5U= +go.tmz.dev/musttag v0.7.2 h1:1J6S9ipDbalBSODNT5jCep8dhZyMr4ttnjQagmGYR5s= +go.tmz.dev/musttag v0.7.2/go.mod h1:m6q5NiiSKMnQYokefa2xGoyoXnrswCbJ0AWYzf4Zs28= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= @@ -688,8 +700,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= -golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -736,8 +748,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -780,7 +792,7 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -804,8 +816,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -866,8 +878,8 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -885,8 +897,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -958,8 +970,8 @@ golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= -golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1082,8 +1094,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.4.3 h1:o/n5/K5gXqk8Gozvs2cnL0F2S1/g1vcGCAx2vETjITw= -honnef.co/go/tools v0.4.3/go.mod h1:36ZgoUOrqOk1GxwHhyryEkq8FQWkUO2xGuSMhUCcdvA= +honnef.co/go/tools v0.4.6 h1:oFEHCKeID7to/3autwsWfnuv69j3NsfcXbvJKuIcep8= +honnef.co/go/tools v0.4.6/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0= mvdan.cc/gofumpt v0.5.0 h1:0EQ+Z56k8tXjj/6TQD25BFNKQXpCvT0rnansIc7Ug5E= mvdan.cc/gofumpt v0.5.0/go.mod h1:HBeVDtMKRZpXyxFciAirzdKklDlGu8aAy1wEbH5Y9js= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= From 4cbb14e0bbe87ca76362064ccec8fd24dbd130e4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 22 Oct 2023 21:59:13 +0000 Subject: [PATCH 22/27] build(deps): bump github.com/aiven/aiven-go-client/v2 from 2.1.0 to 2.2.0 (#1400) --- go.mod | 2 +- go.sum | 4 +-- .../sdkprovider/service/opensearch/common.go | 28 +++++++++---------- .../opensearch/opensearch_acl_config.go | 10 +++---- .../opensearch_acl_config_data_source.go | 2 +- .../opensearch/opensearch_acl_config_test.go | 2 +- .../service/opensearch/opensearch_acl_rule.go | 16 +++++------ .../opensearch_acl_rule_data_source.go | 4 +-- .../opensearch/opensearch_acl_rule_test.go | 4 +-- 9 files changed, 36 insertions(+), 36 deletions(-) diff --git a/go.mod b/go.mod index 079de931b..10714d534 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/aiven/terraform-provider-aiven go 1.21.1 require ( - github.com/aiven/aiven-go-client/v2 v2.1.0 + github.com/aiven/aiven-go-client/v2 v2.2.0 github.com/avast/retry-go v3.0.0+incompatible github.com/dave/jennifer v1.7.0 github.com/docker/go-units v0.5.0 diff --git a/go.sum b/go.sum index 1f66c7661..8151e42ce 100644 --- a/go.sum +++ b/go.sum @@ -201,8 +201,8 @@ github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/aiven/aiven-go-client/v2 v2.1.0 h1:n8k34HpEQ7KgxRcyX/F5WKR6xh8MSAM6TtPHnghDNGg= -github.com/aiven/aiven-go-client/v2 v2.1.0/go.mod h1:x0xhzxWEKAwKv0xY5FvECiI6tesWshcPHvjwl0B/1SU= +github.com/aiven/aiven-go-client/v2 v2.2.0 h1:ZPeIMF3Jt/wPrwFj3mO1Z8KtP9OHc6GpMtZXmwfBsE4= +github.com/aiven/aiven-go-client/v2 v2.2.0/go.mod h1:x0xhzxWEKAwKv0xY5FvECiI6tesWshcPHvjwl0B/1SU= github.com/aiven/go-api-schemas v1.38.0 h1:l+OOMYRG1tddTFQN4OihE6+sxVdP5fKeRN/9uoKFnkk= github.com/aiven/go-api-schemas v1.38.0/go.mod h1:/bPxBUHza/2Aeer6hIIdB++GxKiw9K1KCBtRa2rtZ5I= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= diff --git a/internal/sdkprovider/service/opensearch/common.go b/internal/sdkprovider/service/opensearch/common.go index f4788e841..058b5c00c 100644 --- a/internal/sdkprovider/service/opensearch/common.go +++ b/internal/sdkprovider/service/opensearch/common.go @@ -20,26 +20,26 @@ func resourceOpenSearchACLModifyRemoteConfig( project string, serviceName string, client *aiven.Client, - modifiers ...func(*aiven.ElasticSearchACLConfig), + modifiers ...func(*aiven.OpenSearchACLConfig), ) error { resourceOpenSearchACLModifierMutex.Lock() defer resourceOpenSearchACLModifierMutex.Unlock() - r, err := client.ElasticsearchACLs.Get(ctx, project, serviceName) + r, err := client.OpenSearchACLs.Get(ctx, project, serviceName) if err != nil { return err } - config := r.ElasticSearchACLConfig + config := r.OpenSearchACLConfig for i := range modifiers { modifiers[i](&config) } - _, err = client.ElasticsearchACLs.Update( + _, err = client.OpenSearchACLs.Update( ctx, project, serviceName, - aiven.ElasticsearchACLRequest{ElasticSearchACLConfig: config}) + aiven.OpenSearchACLRequest{OpenSearchACLConfig: config}) if err != nil { return err } @@ -48,35 +48,35 @@ func resourceOpenSearchACLModifyRemoteConfig( // some modifiers -func resourceElasticsearchACLModifierUpdateACLRule( +func resourceOpenSearchACLModifierUpdateACLRule( ctx context.Context, username string, index string, permission string, -) func(*aiven.ElasticSearchACLConfig) { - return func(cfg *aiven.ElasticSearchACLConfig) { +) func(*aiven.OpenSearchACLConfig) { + return func(cfg *aiven.OpenSearchACLConfig) { cfg.Add(resourceOpenSearchACLRuleMkAivenACL(username, index, permission)) // delete the old acl if it's there - if prevPerm, ok := resourceElasticsearchACLRuleGetPermissionFromACLResponse(*cfg, username, index); ok && prevPerm != permission { + if prevPerm, ok := resourceOpenSearchACLRuleGetPermissionFromACLResponse(*cfg, username, index); ok && prevPerm != permission { cfg.Delete(ctx, resourceOpenSearchACLRuleMkAivenACL(username, index, prevPerm)) } } } -func resourceElasticsearchACLModifierDeleteACLRule( +func resourceOpenSearchACLModifierDeleteACLRule( ctx context.Context, username string, index string, permission string, -) func(*aiven.ElasticSearchACLConfig) { - return func(cfg *aiven.ElasticSearchACLConfig) { +) func(*aiven.OpenSearchACLConfig) { + return func(cfg *aiven.OpenSearchACLConfig) { cfg.Delete(ctx, resourceOpenSearchACLRuleMkAivenACL(username, index, permission)) } } -func resourceElasticsearchACLModifierToggleConfigFields(enabled, extednedACL bool) func(*aiven.ElasticSearchACLConfig) { - return func(cfg *aiven.ElasticSearchACLConfig) { +func resourceOpenSearchACLModifierToggleConfigFields(enabled, extednedACL bool) func(*aiven.OpenSearchACLConfig) { + return func(cfg *aiven.OpenSearchACLConfig) { cfg.Enabled = enabled cfg.ExtendedAcl = extednedACL } diff --git a/internal/sdkprovider/service/opensearch/opensearch_acl_config.go b/internal/sdkprovider/service/opensearch/opensearch_acl_config.go index 304eb6fc6..a61edace1 100644 --- a/internal/sdkprovider/service/opensearch/opensearch_acl_config.go +++ b/internal/sdkprovider/service/opensearch/opensearch_acl_config.go @@ -53,7 +53,7 @@ func resourceOpenSearchACLConfigRead(ctx context.Context, d *schema.ResourceData return diag.FromErr(err) } - r, err := client.ElasticsearchACLs.Get(ctx, project, serviceName) + r, err := client.OpenSearchACLs.Get(ctx, project, serviceName) if err != nil { return diag.FromErr(schemautil.ResourceReadHandleNotFound(err, d)) } @@ -64,10 +64,10 @@ func resourceOpenSearchACLConfigRead(ctx context.Context, d *schema.ResourceData if err := d.Set("service_name", serviceName); err != nil { return diag.Errorf("error setting ACLs `service_name` for resource %s: %s", d.Id(), err) } - if err := d.Set("extended_acl", r.ElasticSearchACLConfig.ExtendedAcl); err != nil { + if err := d.Set("extended_acl", r.OpenSearchACLConfig.ExtendedAcl); err != nil { return diag.Errorf("error setting ACLs `extended_acl` for resource %s: %s", d.Id(), err) } - if err := d.Set("enabled", r.ElasticSearchACLConfig.Enabled); err != nil { + if err := d.Set("enabled", r.OpenSearchACLConfig.Enabled); err != nil { return diag.Errorf("error setting ACLs `enable` for resource %s: %s", d.Id(), err) } return nil @@ -79,7 +79,7 @@ func resourceOpenSearchACLConfigUpdate(ctx context.Context, d *schema.ResourceDa project := d.Get("project").(string) serviceName := d.Get("service_name").(string) - modifier := resourceElasticsearchACLModifierToggleConfigFields(d.Get("enabled").(bool), d.Get("extended_acl").(bool)) + modifier := resourceOpenSearchACLModifierToggleConfigFields(d.Get("enabled").(bool), d.Get("extended_acl").(bool)) err := resourceOpenSearchACLModifyRemoteConfig(ctx, project, serviceName, client, modifier) if err != nil { return diag.FromErr(err) @@ -96,7 +96,7 @@ func resourceOpenSearchACLConfigDelete(ctx context.Context, d *schema.ResourceDa project := d.Get("project").(string) serviceName := d.Get("service_name").(string) - modifier := resourceElasticsearchACLModifierToggleConfigFields(false, false) + modifier := resourceOpenSearchACLModifierToggleConfigFields(false, false) err := resourceOpenSearchACLModifyRemoteConfig(ctx, project, serviceName, client, modifier) if err != nil { return diag.FromErr(err) diff --git a/internal/sdkprovider/service/opensearch/opensearch_acl_config_data_source.go b/internal/sdkprovider/service/opensearch/opensearch_acl_config_data_source.go index 587fb2ddf..4cc38f9aa 100644 --- a/internal/sdkprovider/service/opensearch/opensearch_acl_config_data_source.go +++ b/internal/sdkprovider/service/opensearch/opensearch_acl_config_data_source.go @@ -24,7 +24,7 @@ func datasourceOpenSearchACLConfigRead(ctx context.Context, d *schema.ResourceDa projectName := d.Get("project").(string) serviceName := d.Get("service_name").(string) - acl, err := client.ElasticsearchACLs.Get(ctx, projectName, serviceName) + acl, err := client.OpenSearchACLs.Get(ctx, projectName, serviceName) if err != nil { return diag.FromErr(err) } diff --git a/internal/sdkprovider/service/opensearch/opensearch_acl_config_test.go b/internal/sdkprovider/service/opensearch/opensearch_acl_config_test.go index 0b2130773..9bd4b5981 100644 --- a/internal/sdkprovider/service/opensearch/opensearch_acl_config_test.go +++ b/internal/sdkprovider/service/opensearch/opensearch_acl_config_test.go @@ -82,7 +82,7 @@ func testAccCheckAivenOpenSearchACLConfigResourceDestroy(s *terraform.State) err return err } - r, err := c.ElasticsearchACLs.Get(ctx, projectName, serviceName) + r, err := c.OpenSearchACLs.Get(ctx, projectName, serviceName) if err != nil { if err.(aiven.Error).Status != 404 { return err diff --git a/internal/sdkprovider/service/opensearch/opensearch_acl_rule.go b/internal/sdkprovider/service/opensearch/opensearch_acl_rule.go index 9eadc54ef..7d5f783be 100644 --- a/internal/sdkprovider/service/opensearch/opensearch_acl_rule.go +++ b/internal/sdkprovider/service/opensearch/opensearch_acl_rule.go @@ -53,7 +53,7 @@ func ResourceOpenSearchACLRule() *schema.Resource { } } -func resourceElasticsearchACLRuleGetPermissionFromACLResponse(cfg aiven.ElasticSearchACLConfig, username, index string) (string, bool) { +func resourceOpenSearchACLRuleGetPermissionFromACLResponse(cfg aiven.OpenSearchACLConfig, username, index string) (string, bool) { for _, acl := range cfg.ACLs { if acl.Username != username { continue @@ -75,11 +75,11 @@ func resourceOpenSearchACLRuleRead(ctx context.Context, d *schema.ResourceData, return diag.FromErr(err) } - r, err := client.ElasticsearchACLs.Get(ctx, project, serviceName) + r, err := client.OpenSearchACLs.Get(ctx, project, serviceName) if err != nil { return diag.FromErr(schemautil.ResourceReadHandleNotFound(err, d)) } - permission, found := resourceElasticsearchACLRuleGetPermissionFromACLResponse(r.ElasticSearchACLConfig, username, index) + permission, found := resourceOpenSearchACLRuleGetPermissionFromACLResponse(r.OpenSearchACLConfig, username, index) if !found { return diag.FromErr(schemautil.ResourceReadHandleNotFound(err, d)) } @@ -103,10 +103,10 @@ func resourceOpenSearchACLRuleRead(ctx context.Context, d *schema.ResourceData, return nil } -func resourceOpenSearchACLRuleMkAivenACL(username, index, permission string) aiven.ElasticSearchACL { - return aiven.ElasticSearchACL{ +func resourceOpenSearchACLRuleMkAivenACL(username, index, permission string) aiven.OpenSearchACL { + return aiven.OpenSearchACL{ Username: username, - Rules: []aiven.ElasticsearchACLRule{ + Rules: []aiven.OpenSearchACLRule{ { Index: index, Permission: permission, @@ -124,7 +124,7 @@ func resourceOpenSearchACLRuleUpdate(ctx context.Context, d *schema.ResourceData index := d.Get("index").(string) permission := d.Get("permission").(string) - modifier := resourceElasticsearchACLModifierUpdateACLRule(ctx, username, index, permission) + modifier := resourceOpenSearchACLModifierUpdateACLRule(ctx, username, index, permission) err := resourceOpenSearchACLModifyRemoteConfig(ctx, project, serviceName, client, modifier) if err != nil { return diag.FromErr(err) @@ -144,7 +144,7 @@ func resourceOpenSearchACLRuleDelete(ctx context.Context, d *schema.ResourceData index := d.Get("index").(string) permission := d.Get("permission").(string) - modifier := resourceElasticsearchACLModifierDeleteACLRule(ctx, username, index, permission) + modifier := resourceOpenSearchACLModifierDeleteACLRule(ctx, username, index, permission) err := resourceOpenSearchACLModifyRemoteConfig(ctx, project, serviceName, client, modifier) if err != nil { return diag.FromErr(err) diff --git a/internal/sdkprovider/service/opensearch/opensearch_acl_rule_data_source.go b/internal/sdkprovider/service/opensearch/opensearch_acl_rule_data_source.go index e8f87d0cd..666ed967b 100644 --- a/internal/sdkprovider/service/opensearch/opensearch_acl_rule_data_source.go +++ b/internal/sdkprovider/service/opensearch/opensearch_acl_rule_data_source.go @@ -26,12 +26,12 @@ func datasourceOpenSearchACLRuleRead(ctx context.Context, d *schema.ResourceData username := d.Get("username").(string) index := d.Get("index").(string) - r, err := client.ElasticsearchACLs.Get(ctx, projectName, serviceName) + r, err := client.OpenSearchACLs.Get(ctx, projectName, serviceName) if err != nil { return diag.FromErr(err) } - if _, found := resourceElasticsearchACLRuleGetPermissionFromACLResponse(r.ElasticSearchACLConfig, username, index); !found { + if _, found := resourceOpenSearchACLRuleGetPermissionFromACLResponse(r.OpenSearchACLConfig, username, index); !found { return diag.Errorf("acl rule %s/%s/%s/%s not found", projectName, serviceName, username, index) } diff --git a/internal/sdkprovider/service/opensearch/opensearch_acl_rule_test.go b/internal/sdkprovider/service/opensearch/opensearch_acl_rule_test.go index 77b6873bd..f18209aaa 100644 --- a/internal/sdkprovider/service/opensearch/opensearch_acl_rule_test.go +++ b/internal/sdkprovider/service/opensearch/opensearch_acl_rule_test.go @@ -91,7 +91,7 @@ func testAccCheckAivenOpenSearchACLRuleResourceDestroy(s *terraform.State) error return err } - r, err := c.ElasticsearchACLs.Get(ctx, projectName, serviceName) + r, err := c.OpenSearchACLs.Get(ctx, projectName, serviceName) if err != nil { if err.(aiven.Error).Status != 404 { return err @@ -101,7 +101,7 @@ func testAccCheckAivenOpenSearchACLRuleResourceDestroy(s *terraform.State) error return nil } - for _, acl := range r.ElasticSearchACLConfig.ACLs { + for _, acl := range r.OpenSearchACLConfig.ACLs { if acl.Username != username { continue } From f64aa9b60ac75f7d9569fdd67fbd4a77aa24ee62 Mon Sep 17 00:00:00 2001 From: Aleksander Zaruczewski Date: Mon, 23 Oct 2023 02:34:07 -0400 Subject: [PATCH 23/27] test(project): fix failing tests (#1404) --- .../sdkprovider/service/project/project_user_test.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/internal/sdkprovider/service/project/project_user_test.go b/internal/sdkprovider/service/project/project_user_test.go index d91dc8ee5..326429e24 100644 --- a/internal/sdkprovider/service/project/project_user_test.go +++ b/internal/sdkprovider/service/project/project_user_test.go @@ -83,14 +83,19 @@ func testAccCheckAivenProjectUserResourceDestroy(s *terraform.State) error { func testAccProjectUserResource(name string) string { return fmt.Sprintf(` +resource "aiven_organization" "foo" { + name = "test-acc-org-%[1]s" +} + resource "aiven_project" "foo" { - project = "test-acc-pr-%s" + project = "test-acc-pr-%[1]s" default_cloud = "aws-eu-west-2" + parent_id = aiven_organization.foo.id } resource "aiven_project_user" "bar" { project = aiven_project.foo.project - email = "ivan.savciuc+%s@aiven.fi" + email = "ivan.savciuc+%[1]s@aiven.fi" member_type = "admin" } @@ -99,7 +104,7 @@ data "aiven_project_user" "user" { email = aiven_project_user.bar.email depends_on = [aiven_project_user.bar] -}`, name, name) +}`, name) } func testAccProjectUserDeveloperResource(name string) string { From 70ba3fdaf3cda0759c57f543af40f61380f1dac2 Mon Sep 17 00:00:00 2001 From: Aleksander Zaruczewski Date: Mon, 23 Oct 2023 03:14:10 -0400 Subject: [PATCH 24/27] test(account): fix failing team tests (#1405) --- .github/workflows/acceptance-tests.yml | 5 ++++- .../account/account_authentication_test.go | 21 ++++++++++++------- .../account/account_team_data_source_test.go | 5 +++++ .../account_team_member_data_source_test.go | 5 +++++ .../account/account_team_member_test.go | 19 +++++++++++------ .../account/account_team_project_test.go | 21 ++++++++++++------- .../service/account/account_team_test.go | 15 +++++++++---- 7 files changed, 66 insertions(+), 25 deletions(-) diff --git a/.github/workflows/acceptance-tests.yml b/.github/workflows/acceptance-tests.yml index 52d659600..c1ce5db57 100644 --- a/.github/workflows/acceptance-tests.yml +++ b/.github/workflows/acceptance-tests.yml @@ -69,6 +69,8 @@ jobs: env: AIVEN_TOKEN: ${{ secrets.AIVEN_TOKEN }} AIVEN_PROJECT_NAME: ${{ secrets.AIVEN_PROJECT_NAME }} + AIVEN_ORGANIZATION_NAME: ${{ secrets.AIVEN_ORGANIZATION_NAME }} + AIVEN_ACCOUNT_NAME: ${{ secrets.AIVEN_ORGANIZATION_NAME }} PKG: ${{matrix.pkg}} sweep: @@ -106,4 +108,5 @@ jobs: env: AIVEN_TOKEN: ${{ secrets.AIVEN_TOKEN }} AIVEN_PROJECT_NAME: ${{ secrets.AIVEN_PROJECT_NAME }} - + AIVEN_ORGANIZATION_NAME: ${{ secrets.AIVEN_ORGANIZATION_NAME }} + AIVEN_ACCOUNT_NAME: ${{ secrets.AIVEN_ORGANIZATION_NAME }} diff --git a/internal/sdkprovider/service/account/account_authentication_test.go b/internal/sdkprovider/service/account/account_authentication_test.go index 4c3cb85ef..87ce69ba5 100644 --- a/internal/sdkprovider/service/account/account_authentication_test.go +++ b/internal/sdkprovider/service/account/account_authentication_test.go @@ -11,6 +11,7 @@ import ( "fmt" "log" "math/big" + "os" "strings" "testing" "time" @@ -133,6 +134,10 @@ func TestAccAivenAccountAuthentication_saml_invalid_certificate(t *testing.T) { } func TestAccAivenAccountAuthentication_auto_join_team_id(t *testing.T) { + if _, ok := os.LookupEnv("AIVEN_ACCOUNT_NAME"); !ok { + t.Skip("AIVEN_ACCOUNT_NAME env variable is required to run this test") + } + resourceName := "aiven_account_authentication.foo" rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) @@ -177,19 +182,21 @@ data "aiven_account_authentication" "auth" { } func testAccAccountAuthenticationWithAutoJoinTeamIDResource(name string) string { + orgName := os.Getenv("AIVEN_ACCOUNT_NAME") + return fmt.Sprintf(` -resource "aiven_account" "foo" { - name = "test-acc-ac-%s" +data "aiven_account" "foo" { + name = "%[1]s" } resource "aiven_account_team" "foo" { - account_id = aiven_account.foo.account_id - name = "test-acc-team-%s" + account_id = data.aiven_account.foo.account_id + name = "test-acc-team-%[2]s" } resource "aiven_account_authentication" "foo" { - account_id = aiven_account.foo.account_id - name = "test-acc-auth-%s" + account_id = data.aiven_account.foo.account_id + name = "test-acc-auth-%[2]s" type = "saml" enabled = false auto_join_team_id = aiven_account_team.foo.team_id @@ -207,7 +214,7 @@ data "aiven_account_authentication" "auth" { name = aiven_account_authentication.foo.name depends_on = [aiven_account_authentication.foo] -}`, name, name, name) +}`, orgName, name) } func testAccCheckAivenAccountAuthenticationResourceDestroy(s *terraform.State) error { diff --git a/internal/sdkprovider/service/account/account_team_data_source_test.go b/internal/sdkprovider/service/account/account_team_data_source_test.go index dc05b9cc5..7bff91824 100644 --- a/internal/sdkprovider/service/account/account_team_data_source_test.go +++ b/internal/sdkprovider/service/account/account_team_data_source_test.go @@ -1,6 +1,7 @@ package account_test import ( + "os" "testing" "github.com/hashicorp/terraform-plugin-testing/helper/acctest" @@ -10,6 +11,10 @@ import ( ) func TestAccAivenAccountTeamDataSource_basic(t *testing.T) { + if _, ok := os.LookupEnv("AIVEN_ACCOUNT_NAME"); !ok { + t.Skip("AIVEN_ACCOUNT_NAME env variable is required to run this test") + } + datasourceName := "data.aiven_account_team.team" resourceName := "aiven_account_team.foo" rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) diff --git a/internal/sdkprovider/service/account/account_team_member_data_source_test.go b/internal/sdkprovider/service/account/account_team_member_data_source_test.go index c893902d5..b87ebe4e7 100644 --- a/internal/sdkprovider/service/account/account_team_member_data_source_test.go +++ b/internal/sdkprovider/service/account/account_team_member_data_source_test.go @@ -1,6 +1,7 @@ package account_test import ( + "os" "testing" "github.com/hashicorp/terraform-plugin-testing/helper/acctest" @@ -10,6 +11,10 @@ import ( ) func TestAccAivenAccountTeamMemberDataSource_basic(t *testing.T) { + if _, ok := os.LookupEnv("AIVEN_ACCOUNT_NAME"); !ok { + t.Skip("AIVEN_ACCOUNT_NAME env variable is required to run this test") + } + datasourceName := "data.aiven_account_team_member.member" resourceName := "aiven_account_team_member.foo" rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) diff --git a/internal/sdkprovider/service/account/account_team_member_test.go b/internal/sdkprovider/service/account/account_team_member_test.go index c7a270a66..d9c820da7 100644 --- a/internal/sdkprovider/service/account/account_team_member_test.go +++ b/internal/sdkprovider/service/account/account_team_member_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "log" + "os" "testing" "github.com/aiven/aiven-go-client/v2" @@ -16,6 +17,10 @@ import ( ) func TestAccAivenAccountTeamMember_basic(t *testing.T) { + if _, ok := os.LookupEnv("AIVEN_ACCOUNT_NAME"); !ok { + t.Skip("AIVEN_ACCOUNT_NAME env variable is required to run this test") + } + resourceName := "aiven_account_team_member.foo" rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) @@ -37,20 +42,22 @@ func TestAccAivenAccountTeamMember_basic(t *testing.T) { } func testAccAccountTeamMemberResource(name string) string { + orgName := os.Getenv("AIVEN_ACCOUNT_NAME") + return fmt.Sprintf(` -resource "aiven_account" "foo" { - name = "test-acc-ac-%s" +data "aiven_account" "foo" { + name = "%[1]s" } resource "aiven_account_team" "foo" { - account_id = aiven_account.foo.account_id - name = "test-acc-team-%s" + account_id = data.aiven_account.foo.account_id + name = "test-acc-team-%[2]s" } resource "aiven_account_team_member" "foo" { team_id = aiven_account_team.foo.team_id account_id = aiven_account_team.foo.account_id - user_email = "ivan.savciuc+%s@aiven.fi" + user_email = "ivan.savciuc+%[2]s@aiven.fi" } data "aiven_account_team_member" "member" { @@ -59,7 +66,7 @@ data "aiven_account_team_member" "member" { user_email = aiven_account_team_member.foo.user_email depends_on = [aiven_account_team_member.foo] -}`, name, name, name) +}`, orgName, name) } func testAccCheckAivenAccountTeamMemberResourceDestroy(s *terraform.State) error { diff --git a/internal/sdkprovider/service/account/account_team_project_test.go b/internal/sdkprovider/service/account/account_team_project_test.go index 49c3f8fa3..f0ef22f27 100644 --- a/internal/sdkprovider/service/account/account_team_project_test.go +++ b/internal/sdkprovider/service/account/account_team_project_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "log" + "os" "testing" "github.com/aiven/aiven-go-client/v2" @@ -16,6 +17,10 @@ import ( ) func TestAccAivenAccountTeamProject_basic(t *testing.T) { + if _, ok := os.LookupEnv("AIVEN_ACCOUNT_NAME"); !ok { + t.Skip("AIVEN_ACCOUNT_NAME env variable is required to run this test") + } + resourceName := "aiven_account_team_project.foo" rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) @@ -37,23 +42,25 @@ func TestAccAivenAccountTeamProject_basic(t *testing.T) { } func testAccAccountTeamProjectResource(name string) string { + orgName := os.Getenv("AIVEN_ACCOUNT_NAME") + return fmt.Sprintf(` -resource "aiven_account" "foo" { - name = "test-acc-ac-%s" +data "aiven_account" "foo" { + name = "%[1]s" } resource "aiven_account_team" "foo" { - account_id = aiven_account.foo.account_id - name = "test-acc-team-%s" + account_id = data.aiven_account.foo.account_id + name = "test-acc-team-%[2]s" } resource "aiven_project" "foo" { - project = "test-acc-pr-%s" + project = "test-acc-pr-%[2]s" account_id = aiven_account_team.foo.account_id } resource "aiven_account_team_project" "foo" { - account_id = aiven_account.foo.account_id + account_id = data.aiven_account.foo.account_id team_id = aiven_account_team.foo.team_id project_name = aiven_project.foo.project team_type = "admin" @@ -65,7 +72,7 @@ data "aiven_account_team_project" "project" { project_name = aiven_account_team_project.foo.project_name depends_on = [aiven_account_team_project.foo] -}`, name, name, name) +}`, orgName, name) } func testAccCheckAivenAccountTeamProjectResourceDestroy(s *terraform.State) error { diff --git a/internal/sdkprovider/service/account/account_team_test.go b/internal/sdkprovider/service/account/account_team_test.go index 24c9031e0..0e7ae8f54 100644 --- a/internal/sdkprovider/service/account/account_team_test.go +++ b/internal/sdkprovider/service/account/account_team_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "log" + "os" "testing" "github.com/aiven/aiven-go-client/v2" @@ -16,6 +17,10 @@ import ( ) func TestAccAivenAccountTeam_basic(t *testing.T) { + if _, ok := os.LookupEnv("AIVEN_ACCOUNT_NAME"); !ok { + t.Skip("AIVEN_ACCOUNT_NAME env variable is required to run this test") + } + resourceName := "aiven_account_team.foo" rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) @@ -36,13 +41,15 @@ func TestAccAivenAccountTeam_basic(t *testing.T) { } func testAccAccountTeamResource(name string) string { + orgName := os.Getenv("AIVEN_ACCOUNT_NAME") + return fmt.Sprintf(` -resource "aiven_account" "foo" { - name = "test-acc-ac-%s" +data "aiven_account" "foo" { + name = "%s" } resource "aiven_account_team" "foo" { - account_id = aiven_account.foo.account_id + account_id = data.aiven_account.foo.account_id name = "test-acc-team-%s" } @@ -51,7 +58,7 @@ data "aiven_account_team" "team" { account_id = aiven_account_team.foo.account_id depends_on = [aiven_account_team.foo] -}`, name, name) +}`, orgName, name) } func testAccCheckAivenAccountTeamResourceDestroy(s *terraform.State) error { From 73a05e63ce16097bfb69a024e4239316601a544a Mon Sep 17 00:00:00 2001 From: Aleksander Zaruczewski Date: Mon, 23 Oct 2023 08:19:28 -0400 Subject: [PATCH 25/27] test(project): fix missing organization link (#1407) --- .../sdkprovider/service/project/project_user_test.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/internal/sdkprovider/service/project/project_user_test.go b/internal/sdkprovider/service/project/project_user_test.go index 326429e24..de5d70d97 100644 --- a/internal/sdkprovider/service/project/project_user_test.go +++ b/internal/sdkprovider/service/project/project_user_test.go @@ -109,14 +109,19 @@ data "aiven_project_user" "user" { func testAccProjectUserDeveloperResource(name string) string { return fmt.Sprintf(` +resource "aiven_organization" "foo" { + name = "test-acc-org-%[1]s" +} + resource "aiven_project" "foo" { - project = "test-acc-pr-%s" + project = "test-acc-pr-%[1]s" default_cloud = "aws-eu-west-2" + parent_id = aiven_organization.foo.id } resource "aiven_project_user" "bar" { project = aiven_project.foo.project - email = "ivan.savciuc+%s@aiven.fi" + email = "ivan.savciuc+%[1]s@aiven.fi" member_type = "developer" } @@ -125,7 +130,7 @@ data "aiven_project_user" "user" { email = aiven_project_user.bar.email depends_on = [aiven_project_user.bar] -}`, name, name) +}`, name) } func testAccCheckAivenProjectUserAttributes(n string) resource.TestCheckFunc { From 34864ff5640edc9299178a36598e78992c9a2d74 Mon Sep 17 00:00:00 2001 From: Aleksander Zaruczewski Date: Mon, 23 Oct 2023 12:50:14 -0400 Subject: [PATCH 26/27] fix(mysql): use correct read context fn (#1408) --- CHANGELOG.md | 3 ++- internal/sdkprovider/service/mysql/mysql_user.go | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 95fc8b215..c9bf9cf9a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,10 +6,11 @@ nav_order: 1 # Changelog -- Deprecating `project_user`, `account_team` and `account_team_member resources` ## [5.0.0] - YYYY-MM-DD - Migrate `aiven_service_integration` to the Plugin Framework +- Deprecating `project_user`, `account_team` and `account_team_member resources` +- Fix incorrect read context in MySQL user resource ## [4.9.1] - 2023-10-03 diff --git a/internal/sdkprovider/service/mysql/mysql_user.go b/internal/sdkprovider/service/mysql/mysql_user.go index bab8ab3ae..c95334065 100644 --- a/internal/sdkprovider/service/mysql/mysql_user.go +++ b/internal/sdkprovider/service/mysql/mysql_user.go @@ -64,7 +64,7 @@ func ResourceMySQLUser() *schema.Resource { Description: "The MySQL User resource allows the creation and management of Aiven MySQL Users.", CreateContext: resourceMySQLUserCreate, UpdateContext: resourceMySQLUserUpdate, - ReadContext: schemautil.DatasourceServiceUserRead, + ReadContext: schemautil.ResourceServiceUserRead, DeleteContext: schemautil.ResourceServiceUserDelete, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, From eaa7300d6119206e84cd63c5dcc7dcf8b6d293b6 Mon Sep 17 00:00:00 2001 From: Aleksander Zaruczewski Date: Tue, 24 Oct 2023 02:22:16 -0400 Subject: [PATCH 27/27] test(flink): fix application version flakiness (#1406) --- .../flink/flink_application_version.go | 44 ++++++++++++++++--- 1 file changed, 37 insertions(+), 7 deletions(-) diff --git a/internal/sdkprovider/service/flink/flink_application_version.go b/internal/sdkprovider/service/flink/flink_application_version.go index 9a6405697..25c9e10ca 100644 --- a/internal/sdkprovider/service/flink/flink_application_version.go +++ b/internal/sdkprovider/service/flink/flink_application_version.go @@ -2,9 +2,12 @@ package flink import ( "context" + "regexp" + "time" "github.com/aiven/aiven-go-client/v2" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/aiven/terraform-provider-aiven/internal/schemautil" @@ -186,13 +189,40 @@ func resourceFlinkApplicationVersionCreate(ctx context.Context, d *schema.Resour sinks = expandFlinkApplicationVersionSourcesOrSinks(d.Get("sink").(*schema.Set).List()) } - r, err := client.FlinkApplicationVersions.Create(ctx, project, serviceName, applicationID, aiven.GenericFlinkApplicationVersionRequest{ - Statement: d.Get("statement").(string), - Sources: sources, - Sinks: sinks, - }) - if err != nil { - return diag.Errorf("cannot create Flink Application Version: %+v - %v", expandFlinkApplicationVersionSourcesOrSinks(d.Get("sources").(*schema.Set).List()), err) + var r *aiven.DetailedFlinkApplicationVersionResponse + + if err := retry.RetryContext(ctx, time.Second*30, func() *retry.RetryError { + var err error + + r, err = client.FlinkApplicationVersions.Create( + ctx, + project, + serviceName, + applicationID, + aiven.GenericFlinkApplicationVersionRequest{ + Statement: d.Get("statement").(string), + Sources: sources, + Sinks: sinks, + }, + ) + if err != nil { + return &retry.RetryError{ + Err: err, + Retryable: regexp.MustCompile( + "Integration not found: " + + "[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}", + ).MatchString(err.Error()), + } + } + return nil + }); err != nil { + return diag.Errorf( + "cannot create Flink Application Version: %+v - %v", + expandFlinkApplicationVersionSourcesOrSinks( + d.Get("sources").(*schema.Set).List(), + ), + err, + ) } d.SetId(schemautil.BuildResourceID(project, serviceName, applicationID, r.ID))