From ccaab1889e8200609cb54ce67c62bb0a1356a910 Mon Sep 17 00:00:00 2001 From: Murad Biashimov Date: Fri, 6 Oct 2023 14:31:56 +0300 Subject: [PATCH] feat(plugin): use set instead of list (#1382) --- docs/data-sources/service_integration.md | 58 +-- docs/resources/service_integration.md | 58 +-- .../service/serviceintegration/models.go | 36 +- .../service/serviceintegration/userconfig.go | 2 +- .../clickhousekafka/clickhouse_kafka.go | 72 ++-- .../clickhouse_postgresql.go | 40 +- .../userconfig/integration/datadog/datadog.go | 144 +++---- .../external_aws_cloudwatch_metrics.go | 56 +-- .../integration/kafkaconnect/kafka_connect.go | 36 +- .../integration/kafkalogs/kafka_logs.go | 40 +- .../kafkamirrormaker/kafka_mirrormaker.go | 36 +- .../userconfig/integration/logs/logs.go | 40 +- .../userconfig/integration/metrics/metrics.go | 48 +-- internal/schemautil/plugin.go | 22 +- .../serviceintegration/service_integration.go | 398 ------------------ .../service_integration_data_source.go | 51 --- ucgenerator/main.go | 42 +- 17 files changed, 365 insertions(+), 814 deletions(-) delete mode 100644 internal/sdkprovider/service/serviceintegration/service_integration.go delete mode 100644 internal/sdkprovider/service/serviceintegration/service_integration_data_source.go diff --git a/docs/data-sources/service_integration.md b/docs/data-sources/service_integration.md index f3f35d1e6..c5a1bf004 100644 --- a/docs/data-sources/service_integration.md +++ b/docs/data-sources/service_integration.md @@ -30,18 +30,18 @@ data "aiven_service_integration" "myintegration" { ### Read-Only -- `clickhouse_kafka_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config)) -- `clickhouse_postgresql_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config)) -- `datadog_user_config` (Block List) (see [below for nested schema](#nestedblock--datadog_user_config)) +- `clickhouse_kafka_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config)) +- `clickhouse_postgresql_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config)) +- `datadog_user_config` (Block Set) (see [below for nested schema](#nestedblock--datadog_user_config)) - `destination_endpoint_id` (String) Destination endpoint for the integration (if any) -- `external_aws_cloudwatch_metrics_user_config` (Block List) External AWS CloudWatch Metrics integration user config (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config)) +- `external_aws_cloudwatch_metrics_user_config` (Block Set) External AWS CloudWatch Metrics integration user config (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config)) - `id` (String) The ID of this resource. - `integration_id` (String) Service Integration Id at aiven -- `kafka_connect_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--kafka_connect_user_config)) -- `kafka_logs_user_config` (Block List) (see [below for nested schema](#nestedblock--kafka_logs_user_config)) -- `kafka_mirrormaker_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config)) -- `logs_user_config` (Block List) (see [below for nested schema](#nestedblock--logs_user_config)) -- `metrics_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--metrics_user_config)) +- `kafka_connect_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--kafka_connect_user_config)) +- `kafka_logs_user_config` (Block Set) (see [below for nested schema](#nestedblock--kafka_logs_user_config)) +- `kafka_mirrormaker_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config)) +- `logs_user_config` (Block Set) (see [below for nested schema](#nestedblock--logs_user_config)) +- `metrics_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--metrics_user_config)) - `source_endpoint_id` (String) Source endpoint for the integration (if any) @@ -49,7 +49,7 @@ data "aiven_service_integration" "myintegration" { Read-Only: -- `tables` (Block List) Tables to create (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables)) +- `tables` (Block Set) Tables to create (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables)) ### Nested Schema for `clickhouse_kafka_user_config.tables` @@ -57,7 +57,7 @@ Read-Only: Read-Only: - `auto_offset_reset` (String) Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`. -- `columns` (Block List) Table columns (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--columns)) +- `columns` (Block Set) Table columns (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--columns)) - `data_format` (String) Message data format. The default value is `JSONEachRow`. - `date_time_input_format` (String) Method to read DateTime from text input formats. The default value is `basic`. - `group_name` (String) Kafka consumers group. The default value is `clickhouse`. @@ -68,7 +68,7 @@ Read-Only: - `num_consumers` (Number) The number of consumers per table per replica. The default value is `1`. - `poll_max_batch_size` (Number) Maximum amount of messages to be polled in a single Kafka poll. The default value is `0`. - `skip_broken_messages` (Number) Skip at least this number of broken messages from Kafka topic per block. The default value is `0`. -- `topics` (Block List) Kafka topics (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--topics)) +- `topics` (Block Set) Kafka topics (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--topics)) ### Nested Schema for `clickhouse_kafka_user_config.tables.columns` @@ -94,7 +94,7 @@ Read-Only: Read-Only: -- `databases` (Block List) Databases to expose (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config--databases)) +- `databases` (Block Set) Databases to expose (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config--databases)) ### Nested Schema for `clickhouse_postgresql_user_config.databases` @@ -112,15 +112,15 @@ Read-Only: Read-Only: - `datadog_dbm_enabled` (Boolean) Enable Datadog Database Monitoring. -- `datadog_tags` (Block List) Custom tags provided by user (see [below for nested schema](#nestedblock--datadog_user_config--datadog_tags)) -- `exclude_consumer_groups` (List of String) List of custom metrics. -- `exclude_topics` (List of String) List of topics to exclude. -- `include_consumer_groups` (List of String) List of custom metrics. -- `include_topics` (List of String) List of topics to include. -- `kafka_custom_metrics` (List of String) List of custom metrics. +- `datadog_tags` (Block Set) Custom tags provided by user (see [below for nested schema](#nestedblock--datadog_user_config--datadog_tags)) +- `exclude_consumer_groups` (Set of String) List of custom metrics. +- `exclude_topics` (Set of String) List of topics to exclude. +- `include_consumer_groups` (Set of String) List of custom metrics. +- `include_topics` (Set of String) List of topics to include. +- `kafka_custom_metrics` (Set of String) List of custom metrics. - `max_jmx_metrics` (Number) Maximum number of JMX metrics to send. -- `opensearch` (Block List) Datadog Opensearch Options (see [below for nested schema](#nestedblock--datadog_user_config--opensearch)) -- `redis` (Block List) Datadog Redis Options (see [below for nested schema](#nestedblock--datadog_user_config--redis)) +- `opensearch` (Block Set) Datadog Opensearch Options (see [below for nested schema](#nestedblock--datadog_user_config--opensearch)) +- `redis` (Block Set) Datadog Redis Options (see [below for nested schema](#nestedblock--datadog_user_config--redis)) ### Nested Schema for `datadog_user_config.datadog_tags` @@ -155,8 +155,8 @@ Read-Only: Read-Only: -- `dropped_metrics` (Block List) Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--dropped_metrics)) -- `extra_metrics` (Block List) Metrics to allow through to AWS CloudWatch (in addition to default metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--extra_metrics)) +- `dropped_metrics` (Block Set) Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--dropped_metrics)) +- `extra_metrics` (Block Set) Metrics to allow through to AWS CloudWatch (in addition to default metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--extra_metrics)) ### Nested Schema for `external_aws_cloudwatch_metrics_user_config.dropped_metrics` @@ -182,7 +182,7 @@ Read-Only: Read-Only: -- `kafka_connect` (Block List) Kafka Connect service configuration values (see [below for nested schema](#nestedblock--kafka_connect_user_config--kafka_connect)) +- `kafka_connect` (Block Set) Kafka Connect service configuration values (see [below for nested schema](#nestedblock--kafka_connect_user_config--kafka_connect)) ### Nested Schema for `kafka_connect_user_config.kafka_connect` @@ -202,7 +202,7 @@ Read-Only: Read-Only: - `kafka_topic` (String) Topic name. -- `selected_log_fields` (List of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent. +- `selected_log_fields` (Set of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent. @@ -211,7 +211,7 @@ Read-Only: Read-Only: - `cluster_alias` (String) The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, '.', '_', and '-'. -- `kafka_mirrormaker` (Block List) Kafka MirrorMaker configuration values (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--kafka_mirrormaker)) +- `kafka_mirrormaker` (Block Set) Kafka MirrorMaker configuration values (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--kafka_mirrormaker)) ### Nested Schema for `kafka_mirrormaker_user_config.kafka_mirrormaker` @@ -234,7 +234,7 @@ Read-Only: - `elasticsearch_index_days_max` (Number) Elasticsearch index retention limit. The default value is `3`. - `elasticsearch_index_prefix` (String) Elasticsearch index prefix. The default value is `logs`. -- `selected_log_fields` (List of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent. +- `selected_log_fields` (Set of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent. @@ -245,7 +245,7 @@ Read-Only: - `database` (String) Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to 'metrics'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - `retention_days` (Number) Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days. - `ro_username` (String) Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to 'metrics_reader'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. -- `source_mysql` (Block List) Configuration options for metrics where source service is MySQL (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql)) +- `source_mysql` (Block Set) Configuration options for metrics where source service is MySQL (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql)) - `username` (String) Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to 'metrics_writer'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. @@ -253,7 +253,7 @@ Read-Only: Read-Only: -- `telegraf` (Block List) Configuration options for Telegraf MySQL input plugin (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql--telegraf)) +- `telegraf` (Block Set) Configuration options for Telegraf MySQL input plugin (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql--telegraf)) ### Nested Schema for `metrics_user_config.source_mysql.telegraf` diff --git a/docs/resources/service_integration.md b/docs/resources/service_integration.md index 9218f0576..b82e11a9d 100644 --- a/docs/resources/service_integration.md +++ b/docs/resources/service_integration.md @@ -33,17 +33,17 @@ resource "aiven_service_integration" "my_integration_metrics" { ### Optional -- `clickhouse_kafka_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config)) -- `clickhouse_postgresql_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config)) -- `datadog_user_config` (Block List) (see [below for nested schema](#nestedblock--datadog_user_config)) +- `clickhouse_kafka_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config)) +- `clickhouse_postgresql_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config)) +- `datadog_user_config` (Block Set) (see [below for nested schema](#nestedblock--datadog_user_config)) - `destination_endpoint_id` (String) Destination endpoint for the integration (if any) - `destination_service_name` (String) Destination service for the integration (if any) -- `external_aws_cloudwatch_metrics_user_config` (Block List) External AWS CloudWatch Metrics integration user config (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config)) -- `kafka_connect_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--kafka_connect_user_config)) -- `kafka_logs_user_config` (Block List) (see [below for nested schema](#nestedblock--kafka_logs_user_config)) -- `kafka_mirrormaker_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config)) -- `logs_user_config` (Block List) (see [below for nested schema](#nestedblock--logs_user_config)) -- `metrics_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--metrics_user_config)) +- `external_aws_cloudwatch_metrics_user_config` (Block Set) External AWS CloudWatch Metrics integration user config (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config)) +- `kafka_connect_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--kafka_connect_user_config)) +- `kafka_logs_user_config` (Block Set) (see [below for nested schema](#nestedblock--kafka_logs_user_config)) +- `kafka_mirrormaker_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config)) +- `logs_user_config` (Block Set) (see [below for nested schema](#nestedblock--logs_user_config)) +- `metrics_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--metrics_user_config)) - `source_endpoint_id` (String) Source endpoint for the integration (if any) - `source_service_name` (String) Source service for the integration (if any) - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) @@ -58,7 +58,7 @@ resource "aiven_service_integration" "my_integration_metrics" { Optional: -- `tables` (Block List) Tables to create (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables)) +- `tables` (Block Set) Tables to create (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables)) ### Nested Schema for `clickhouse_kafka_user_config.tables` @@ -72,7 +72,7 @@ Required: Optional: - `auto_offset_reset` (String) Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`. -- `columns` (Block List) Table columns (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--columns)) +- `columns` (Block Set) Table columns (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--columns)) - `date_time_input_format` (String) Method to read DateTime from text input formats. The default value is `basic`. - `handle_error_mode` (String) How to handle errors for Kafka engine. The default value is `default`. - `max_block_size` (Number) Number of row collected by poll(s) for flushing data from Kafka. The default value is `0`. @@ -80,7 +80,7 @@ Optional: - `num_consumers` (Number) The number of consumers per table per replica. The default value is `1`. - `poll_max_batch_size` (Number) Maximum amount of messages to be polled in a single Kafka poll. The default value is `0`. - `skip_broken_messages` (Number) Skip at least this number of broken messages from Kafka topic per block. The default value is `0`. -- `topics` (Block List) Kafka topics (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--topics)) +- `topics` (Block Set) Kafka topics (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--topics)) ### Nested Schema for `clickhouse_kafka_user_config.tables.columns` @@ -106,7 +106,7 @@ Required: Optional: -- `databases` (Block List) Databases to expose (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config--databases)) +- `databases` (Block Set) Databases to expose (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config--databases)) ### Nested Schema for `clickhouse_postgresql_user_config.databases` @@ -124,15 +124,15 @@ Optional: Optional: - `datadog_dbm_enabled` (Boolean) Enable Datadog Database Monitoring. -- `datadog_tags` (Block List) Custom tags provided by user (see [below for nested schema](#nestedblock--datadog_user_config--datadog_tags)) -- `exclude_consumer_groups` (List of String) List of custom metrics. -- `exclude_topics` (List of String) List of topics to exclude. -- `include_consumer_groups` (List of String) List of custom metrics. -- `include_topics` (List of String) List of topics to include. -- `kafka_custom_metrics` (List of String) List of custom metrics. +- `datadog_tags` (Block Set) Custom tags provided by user (see [below for nested schema](#nestedblock--datadog_user_config--datadog_tags)) +- `exclude_consumer_groups` (Set of String) List of custom metrics. +- `exclude_topics` (Set of String) List of topics to exclude. +- `include_consumer_groups` (Set of String) List of custom metrics. +- `include_topics` (Set of String) List of topics to include. +- `kafka_custom_metrics` (Set of String) List of custom metrics. - `max_jmx_metrics` (Number) Maximum number of JMX metrics to send. -- `opensearch` (Block List) Datadog Opensearch Options (see [below for nested schema](#nestedblock--datadog_user_config--opensearch)) -- `redis` (Block List) Datadog Redis Options (see [below for nested schema](#nestedblock--datadog_user_config--redis)) +- `opensearch` (Block Set) Datadog Opensearch Options (see [below for nested schema](#nestedblock--datadog_user_config--opensearch)) +- `redis` (Block Set) Datadog Redis Options (see [below for nested schema](#nestedblock--datadog_user_config--redis)) ### Nested Schema for `datadog_user_config.datadog_tags` @@ -170,8 +170,8 @@ Optional: Optional: -- `dropped_metrics` (Block List) Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--dropped_metrics)) -- `extra_metrics` (Block List) Metrics to allow through to AWS CloudWatch (in addition to default metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--extra_metrics)) +- `dropped_metrics` (Block Set) Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--dropped_metrics)) +- `extra_metrics` (Block Set) Metrics to allow through to AWS CloudWatch (in addition to default metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--extra_metrics)) ### Nested Schema for `external_aws_cloudwatch_metrics_user_config.dropped_metrics` @@ -197,7 +197,7 @@ Required: Optional: -- `kafka_connect` (Block List) Kafka Connect service configuration values (see [below for nested schema](#nestedblock--kafka_connect_user_config--kafka_connect)) +- `kafka_connect` (Block Set) Kafka Connect service configuration values (see [below for nested schema](#nestedblock--kafka_connect_user_config--kafka_connect)) ### Nested Schema for `kafka_connect_user_config.kafka_connect` @@ -220,7 +220,7 @@ Required: Optional: -- `selected_log_fields` (List of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent. +- `selected_log_fields` (Set of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent. @@ -229,7 +229,7 @@ Optional: Optional: - `cluster_alias` (String) The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, '.', '_', and '-'. -- `kafka_mirrormaker` (Block List) Kafka MirrorMaker configuration values (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--kafka_mirrormaker)) +- `kafka_mirrormaker` (Block Set) Kafka MirrorMaker configuration values (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--kafka_mirrormaker)) ### Nested Schema for `kafka_mirrormaker_user_config.kafka_mirrormaker` @@ -252,7 +252,7 @@ Optional: - `elasticsearch_index_days_max` (Number) Elasticsearch index retention limit. The default value is `3`. - `elasticsearch_index_prefix` (String) Elasticsearch index prefix. The default value is `logs`. -- `selected_log_fields` (List of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent. +- `selected_log_fields` (Set of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent. @@ -263,7 +263,7 @@ Optional: - `database` (String) Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to 'metrics'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - `retention_days` (Number) Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days. - `ro_username` (String) Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to 'metrics_reader'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. -- `source_mysql` (Block List) Configuration options for metrics where source service is MySQL (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql)) +- `source_mysql` (Block Set) Configuration options for metrics where source service is MySQL (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql)) - `username` (String) Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to 'metrics_writer'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. @@ -271,7 +271,7 @@ Optional: Optional: -- `telegraf` (Block List) Configuration options for Telegraf MySQL input plugin (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql--telegraf)) +- `telegraf` (Block Set) Configuration options for Telegraf MySQL input plugin (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql--telegraf)) ### Nested Schema for `metrics_user_config.source_mysql.telegraf` diff --git a/internal/plugin/service/serviceintegration/models.go b/internal/plugin/service/serviceintegration/models.go index c499147c6..f2a752a8e 100644 --- a/internal/plugin/service/serviceintegration/models.go +++ b/internal/plugin/service/serviceintegration/models.go @@ -26,15 +26,15 @@ type resourceModel struct { IntegrationType types.String `tfsdk:"integration_type" copier:"IntegrationType"` SourceEndpointID types.String `tfsdk:"source_endpoint_id" copier:"SourceEndpointID"` SourceServiceName types.String `tfsdk:"source_service_name" copier:"SourceServiceName"` - ClickhouseKafkaUserConfig types.List `tfsdk:"clickhouse_kafka_user_config" copier:"ClickhouseKafkaUserConfig"` - ClickhousePostgresqlUserConfig types.List `tfsdk:"clickhouse_postgresql_user_config" copier:"ClickhousePostgresqlUserConfig"` - DatadogUserConfig types.List `tfsdk:"datadog_user_config" copier:"DatadogUserConfig"` - ExternalAwsCloudwatchMetricsUserConfig types.List `tfsdk:"external_aws_cloudwatch_metrics_user_config" copier:"ExternalAwsCloudwatchMetricsUserConfig"` - KafkaConnectUserConfig types.List `tfsdk:"kafka_connect_user_config" copier:"KafkaConnectUserConfig"` - KafkaLogsUserConfig types.List `tfsdk:"kafka_logs_user_config" copier:"KafkaLogsUserConfig"` - KafkaMirrormakerUserConfig types.List `tfsdk:"kafka_mirrormaker_user_config" copier:"KafkaMirrormakerUserConfig"` - LogsUserConfig types.List `tfsdk:"logs_user_config" copier:"LogsUserConfig"` - MetricsUserConfig types.List `tfsdk:"metrics_user_config" copier:"MetricsUserConfig"` + ClickhouseKafkaUserConfig types.Set `tfsdk:"clickhouse_kafka_user_config" copier:"ClickhouseKafkaUserConfig"` + ClickhousePostgresqlUserConfig types.Set `tfsdk:"clickhouse_postgresql_user_config" copier:"ClickhousePostgresqlUserConfig"` + DatadogUserConfig types.Set `tfsdk:"datadog_user_config" copier:"DatadogUserConfig"` + ExternalAwsCloudwatchMetricsUserConfig types.Set `tfsdk:"external_aws_cloudwatch_metrics_user_config" copier:"ExternalAwsCloudwatchMetricsUserConfig"` + KafkaConnectUserConfig types.Set `tfsdk:"kafka_connect_user_config" copier:"KafkaConnectUserConfig"` + KafkaLogsUserConfig types.Set `tfsdk:"kafka_logs_user_config" copier:"KafkaLogsUserConfig"` + KafkaMirrormakerUserConfig types.Set `tfsdk:"kafka_mirrormaker_user_config" copier:"KafkaMirrormakerUserConfig"` + LogsUserConfig types.Set `tfsdk:"logs_user_config" copier:"LogsUserConfig"` + MetricsUserConfig types.Set `tfsdk:"metrics_user_config" copier:"MetricsUserConfig"` } type dataSourceModel struct { @@ -46,15 +46,15 @@ type dataSourceModel struct { IntegrationType types.String `tfsdk:"integration_type" copier:"IntegrationType"` SourceEndpointID types.String `tfsdk:"source_endpoint_id" copier:"SourceEndpointID"` SourceServiceName types.String `tfsdk:"source_service_name" copier:"SourceServiceName"` - ClickhouseKafkaUserConfig types.List `tfsdk:"clickhouse_kafka_user_config" copier:"ClickhouseKafkaUserConfig"` - ClickhousePostgresqlUserConfig types.List `tfsdk:"clickhouse_postgresql_user_config" copier:"ClickhousePostgresqlUserConfig"` - DatadogUserConfig types.List `tfsdk:"datadog_user_config" copier:"DatadogUserConfig"` - ExternalAwsCloudwatchMetricsUserConfig types.List `tfsdk:"external_aws_cloudwatch_metrics_user_config" copier:"ExternalAwsCloudwatchMetricsUserConfig"` - KafkaConnectUserConfig types.List `tfsdk:"kafka_connect_user_config" copier:"KafkaConnectUserConfig"` - KafkaLogsUserConfig types.List `tfsdk:"kafka_logs_user_config" copier:"KafkaLogsUserConfig"` - KafkaMirrormakerUserConfig types.List `tfsdk:"kafka_mirrormaker_user_config" copier:"KafkaMirrormakerUserConfig"` - LogsUserConfig types.List `tfsdk:"logs_user_config" copier:"LogsUserConfig"` - MetricsUserConfig types.List `tfsdk:"metrics_user_config" copier:"MetricsUserConfig"` + ClickhouseKafkaUserConfig types.Set `tfsdk:"clickhouse_kafka_user_config" copier:"ClickhouseKafkaUserConfig"` + ClickhousePostgresqlUserConfig types.Set `tfsdk:"clickhouse_postgresql_user_config" copier:"ClickhousePostgresqlUserConfig"` + DatadogUserConfig types.Set `tfsdk:"datadog_user_config" copier:"DatadogUserConfig"` + ExternalAwsCloudwatchMetricsUserConfig types.Set `tfsdk:"external_aws_cloudwatch_metrics_user_config" copier:"ExternalAwsCloudwatchMetricsUserConfig"` + KafkaConnectUserConfig types.Set `tfsdk:"kafka_connect_user_config" copier:"KafkaConnectUserConfig"` + KafkaLogsUserConfig types.Set `tfsdk:"kafka_logs_user_config" copier:"KafkaLogsUserConfig"` + KafkaMirrormakerUserConfig types.Set `tfsdk:"kafka_mirrormaker_user_config" copier:"KafkaMirrormakerUserConfig"` + LogsUserConfig types.Set `tfsdk:"logs_user_config" copier:"LogsUserConfig"` + MetricsUserConfig types.Set `tfsdk:"metrics_user_config" copier:"MetricsUserConfig"` } func (p *resourceModel) getID() string { diff --git a/internal/plugin/service/serviceintegration/userconfig.go b/internal/plugin/service/serviceintegration/userconfig.go index 901eaf406..d398f18ba 100644 --- a/internal/plugin/service/serviceintegration/userconfig.go +++ b/internal/plugin/service/serviceintegration/userconfig.go @@ -131,6 +131,6 @@ func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *resourceM } } -func isSet(o types.List) bool { +func isSet(o types.Set) bool { return !(o.IsUnknown() || o.IsNull()) } diff --git a/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka.go b/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka.go index 02b903200..b22a9ef20 100644 --- a/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka.go +++ b/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka.go @@ -5,7 +5,7 @@ package clickhousekafka import ( "context" - listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" attr "github.com/hashicorp/terraform-plugin-framework/attr" datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" diag "github.com/hashicorp/terraform-plugin-framework/diag" @@ -19,10 +19,10 @@ import ( ) // NewResourceSchema returns resource schema -func NewResourceSchema() resource.ListNestedBlock { - return resource.ListNestedBlock{ +func NewResourceSchema() resource.SetNestedBlock { + return resource.SetNestedBlock{ Description: "Integration user config", - NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"tables": resource.ListNestedBlock{ + NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"tables": resource.SetNestedBlock{ Description: "Tables to create", NestedObject: resource.NestedBlockObject{ Attributes: map[string]resource.Attribute{ @@ -88,7 +88,7 @@ func NewResourceSchema() resource.ListNestedBlock { }, }, Blocks: map[string]resource.Block{ - "columns": resource.ListNestedBlock{ + "columns": resource.SetNestedBlock{ Description: "Table columns", NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ "name": resource.StringAttribute{ @@ -100,29 +100,29 @@ func NewResourceSchema() resource.ListNestedBlock { Required: true, }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(100)}, + Validators: []validator.Set{setvalidator.SizeAtMost(100)}, }, - "topics": resource.ListNestedBlock{ + "topics": resource.SetNestedBlock{ Description: "Kafka topics", NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{"name": resource.StringAttribute{ Description: "Name of the topic.", Required: true, }}}, - Validators: []validator.List{listvalidator.SizeAtMost(100)}, + Validators: []validator.Set{setvalidator.SizeAtMost(100)}, }, }, }, - Validators: []validator.List{listvalidator.SizeAtMost(100)}, + Validators: []validator.Set{setvalidator.SizeAtMost(100)}, }}}, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // NewDataSourceSchema returns datasource schema -func NewDataSourceSchema() datasource.ListNestedBlock { - return datasource.ListNestedBlock{ +func NewDataSourceSchema() datasource.SetNestedBlock { + return datasource.SetNestedBlock{ Description: "Integration user config", - NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"tables": datasource.ListNestedBlock{ + NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"tables": datasource.SetNestedBlock{ Description: "Tables to create", NestedObject: datasource.NestedBlockObject{ Attributes: map[string]datasource.Attribute{ @@ -172,7 +172,7 @@ func NewDataSourceSchema() datasource.ListNestedBlock { }, }, Blocks: map[string]datasource.Block{ - "columns": datasource.ListNestedBlock{ + "columns": datasource.SetNestedBlock{ Description: "Table columns", NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ "name": datasource.StringAttribute{ @@ -184,27 +184,27 @@ func NewDataSourceSchema() datasource.ListNestedBlock { Description: "Column type.", }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(100)}, + Validators: []validator.Set{setvalidator.SizeAtMost(100)}, }, - "topics": datasource.ListNestedBlock{ + "topics": datasource.SetNestedBlock{ Description: "Kafka topics", NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{"name": datasource.StringAttribute{ Computed: true, Description: "Name of the topic.", }}}, - Validators: []validator.List{listvalidator.SizeAtMost(100)}, + Validators: []validator.Set{setvalidator.SizeAtMost(100)}, }, }, }, - Validators: []validator.List{listvalidator.SizeAtMost(100)}, + Validators: []validator.Set{setvalidator.SizeAtMost(100)}, }}}, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // tfoUserConfig Integration user config type tfoUserConfig struct { - Tables types.List `tfsdk:"tables"` + Tables types.Set `tfsdk:"tables"` } // dtoUserConfig request/response object @@ -214,7 +214,7 @@ type dtoUserConfig struct { // expandUserConfig expands tf object into dto object func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { - tablesVar := schemautil.ExpandListNested[tfoTables, dtoTables](ctx, diags, expandTables, o.Tables) + tablesVar := schemautil.ExpandSetNested[tfoTables, dtoTables](ctx, diags, expandTables, o.Tables) if diags.HasError() { return nil } @@ -223,19 +223,19 @@ func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserCo // flattenUserConfig flattens dto object into tf object func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { - tablesVar := schemautil.FlattenListNested[dtoTables, tfoTables](ctx, diags, flattenTables, tablesAttrs, o.Tables) + tablesVar := schemautil.FlattenSetNested[dtoTables, tfoTables](ctx, diags, flattenTables, tablesAttrs, o.Tables) if diags.HasError() { return nil } return &tfoUserConfig{Tables: tablesVar} } -var userConfigAttrs = map[string]attr.Type{"tables": types.ListType{ElemType: types.ObjectType{AttrTypes: tablesAttrs}}} +var userConfigAttrs = map[string]attr.Type{"tables": types.SetType{ElemType: types.ObjectType{AttrTypes: tablesAttrs}}} // tfoTables Table to create type tfoTables struct { AutoOffsetReset types.String `tfsdk:"auto_offset_reset"` - Columns types.List `tfsdk:"columns"` + Columns types.Set `tfsdk:"columns"` DataFormat types.String `tfsdk:"data_format"` DateTimeInputFormat types.String `tfsdk:"date_time_input_format"` GroupName types.String `tfsdk:"group_name"` @@ -246,7 +246,7 @@ type tfoTables struct { NumConsumers types.Int64 `tfsdk:"num_consumers"` PollMaxBatchSize types.Int64 `tfsdk:"poll_max_batch_size"` SkipBrokenMessages types.Int64 `tfsdk:"skip_broken_messages"` - Topics types.List `tfsdk:"topics"` + Topics types.Set `tfsdk:"topics"` } // dtoTables request/response object @@ -268,11 +268,11 @@ type dtoTables struct { // expandTables expands tf object into dto object func expandTables(ctx context.Context, diags *diag.Diagnostics, o *tfoTables) *dtoTables { - columnsVar := schemautil.ExpandListNested[tfoColumns, dtoColumns](ctx, diags, expandColumns, o.Columns) + columnsVar := schemautil.ExpandSetNested[tfoColumns, dtoColumns](ctx, diags, expandColumns, o.Columns) if diags.HasError() { return nil } - topicsVar := schemautil.ExpandListNested[tfoTopics, dtoTopics](ctx, diags, expandTopics, o.Topics) + topicsVar := schemautil.ExpandSetNested[tfoTopics, dtoTopics](ctx, diags, expandTopics, o.Topics) if diags.HasError() { return nil } @@ -295,11 +295,11 @@ func expandTables(ctx context.Context, diags *diag.Diagnostics, o *tfoTables) *d // flattenTables flattens dto object into tf object func flattenTables(ctx context.Context, diags *diag.Diagnostics, o *dtoTables) *tfoTables { - columnsVar := schemautil.FlattenListNested[dtoColumns, tfoColumns](ctx, diags, flattenColumns, columnsAttrs, o.Columns) + columnsVar := schemautil.FlattenSetNested[dtoColumns, tfoColumns](ctx, diags, flattenColumns, columnsAttrs, o.Columns) if diags.HasError() { return nil } - topicsVar := schemautil.FlattenListNested[dtoTopics, tfoTopics](ctx, diags, flattenTopics, topicsAttrs, o.Topics) + topicsVar := schemautil.FlattenSetNested[dtoTopics, tfoTopics](ctx, diags, flattenTopics, topicsAttrs, o.Topics) if diags.HasError() { return nil } @@ -322,7 +322,7 @@ func flattenTables(ctx context.Context, diags *diag.Diagnostics, o *dtoTables) * var tablesAttrs = map[string]attr.Type{ "auto_offset_reset": types.StringType, - "columns": types.ListType{ElemType: types.ObjectType{AttrTypes: columnsAttrs}}, + "columns": types.SetType{ElemType: types.ObjectType{AttrTypes: columnsAttrs}}, "data_format": types.StringType, "date_time_input_format": types.StringType, "group_name": types.StringType, @@ -333,7 +333,7 @@ var tablesAttrs = map[string]attr.Type{ "num_consumers": types.Int64Type, "poll_max_batch_size": types.Int64Type, "skip_broken_messages": types.Int64Type, - "topics": types.ListType{ElemType: types.ObjectType{AttrTypes: topicsAttrs}}, + "topics": types.SetType{ElemType: types.ObjectType{AttrTypes: topicsAttrs}}, } // tfoColumns Table column @@ -392,17 +392,17 @@ func flattenTopics(ctx context.Context, diags *diag.Diagnostics, o *dtoTopics) * var topicsAttrs = map[string]attr.Type{"name": types.StringType} // Expand public function that converts tf object into dto -func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { - return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig { + return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set) } // Flatten public function that converts dto into tf object -func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set { o := new(dtoUserConfig) err := schemautil.MapToDTO(m, o) if err != nil { diags.AddError("failed to marshal map user config to dto", err.Error()) - return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs}) } - return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) + return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) } diff --git a/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql.go b/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql.go index a0cbd20b8..e67ab4a68 100644 --- a/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql.go +++ b/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql.go @@ -5,7 +5,7 @@ package clickhousepostgresql import ( "context" - listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" attr "github.com/hashicorp/terraform-plugin-framework/attr" datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" diag "github.com/hashicorp/terraform-plugin-framework/diag" @@ -18,10 +18,10 @@ import ( ) // NewResourceSchema returns resource schema -func NewResourceSchema() resource.ListNestedBlock { - return resource.ListNestedBlock{ +func NewResourceSchema() resource.SetNestedBlock { + return resource.SetNestedBlock{ Description: "Integration user config", - NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"databases": resource.ListNestedBlock{ + NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"databases": resource.SetNestedBlock{ Description: "Databases to expose", NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ "database": resource.StringAttribute{ @@ -37,17 +37,17 @@ func NewResourceSchema() resource.ListNestedBlock { Optional: true, }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(10)}, + Validators: []validator.Set{setvalidator.SizeAtMost(10)}, }}}, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // NewDataSourceSchema returns datasource schema -func NewDataSourceSchema() datasource.ListNestedBlock { - return datasource.ListNestedBlock{ +func NewDataSourceSchema() datasource.SetNestedBlock { + return datasource.SetNestedBlock{ Description: "Integration user config", - NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"databases": datasource.ListNestedBlock{ + NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"databases": datasource.SetNestedBlock{ Description: "Databases to expose", NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ "database": datasource.StringAttribute{ @@ -59,15 +59,15 @@ func NewDataSourceSchema() datasource.ListNestedBlock { Description: "PostgreSQL schema to expose. The default value is `public`.", }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(10)}, + Validators: []validator.Set{setvalidator.SizeAtMost(10)}, }}}, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // tfoUserConfig Integration user config type tfoUserConfig struct { - Databases types.List `tfsdk:"databases"` + Databases types.Set `tfsdk:"databases"` } // dtoUserConfig request/response object @@ -77,7 +77,7 @@ type dtoUserConfig struct { // expandUserConfig expands tf object into dto object func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { - databasesVar := schemautil.ExpandListNested[tfoDatabases, dtoDatabases](ctx, diags, expandDatabases, o.Databases) + databasesVar := schemautil.ExpandSetNested[tfoDatabases, dtoDatabases](ctx, diags, expandDatabases, o.Databases) if diags.HasError() { return nil } @@ -86,14 +86,14 @@ func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserCo // flattenUserConfig flattens dto object into tf object func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { - databasesVar := schemautil.FlattenListNested[dtoDatabases, tfoDatabases](ctx, diags, flattenDatabases, databasesAttrs, o.Databases) + databasesVar := schemautil.FlattenSetNested[dtoDatabases, tfoDatabases](ctx, diags, flattenDatabases, databasesAttrs, o.Databases) if diags.HasError() { return nil } return &tfoUserConfig{Databases: databasesVar} } -var userConfigAttrs = map[string]attr.Type{"databases": types.ListType{ElemType: types.ObjectType{AttrTypes: databasesAttrs}}} +var userConfigAttrs = map[string]attr.Type{"databases": types.SetType{ElemType: types.ObjectType{AttrTypes: databasesAttrs}}} // tfoDatabases Database to expose type tfoDatabases struct { @@ -129,17 +129,17 @@ var databasesAttrs = map[string]attr.Type{ } // Expand public function that converts tf object into dto -func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { - return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig { + return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set) } // Flatten public function that converts dto into tf object -func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set { o := new(dtoUserConfig) err := schemautil.MapToDTO(m, o) if err != nil { diags.AddError("failed to marshal map user config to dto", err.Error()) - return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs}) } - return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) + return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) } diff --git a/internal/plugin/service/userconfig/integration/datadog/datadog.go b/internal/plugin/service/userconfig/integration/datadog/datadog.go index 8a144b214..0cde9d5f6 100644 --- a/internal/plugin/service/userconfig/integration/datadog/datadog.go +++ b/internal/plugin/service/userconfig/integration/datadog/datadog.go @@ -5,7 +5,7 @@ package datadog import ( "context" - listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" attr "github.com/hashicorp/terraform-plugin-framework/attr" datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" diag "github.com/hashicorp/terraform-plugin-framework/diag" @@ -18,8 +18,8 @@ import ( ) // NewResourceSchema returns resource schema -func NewResourceSchema() resource.ListNestedBlock { - return resource.ListNestedBlock{ +func NewResourceSchema() resource.SetNestedBlock { + return resource.SetNestedBlock{ NestedObject: resource.NestedBlockObject{ Attributes: map[string]resource.Attribute{ "datadog_dbm_enabled": resource.BoolAttribute{ @@ -27,40 +27,40 @@ func NewResourceSchema() resource.ListNestedBlock { Description: "Enable Datadog Database Monitoring.", Optional: true, }, - "exclude_consumer_groups": resource.ListAttribute{ + "exclude_consumer_groups": resource.SetAttribute{ Computed: true, Description: "List of custom metrics.", ElementType: types.StringType, Optional: true, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, - "exclude_topics": resource.ListAttribute{ + "exclude_topics": resource.SetAttribute{ Computed: true, Description: "List of topics to exclude.", ElementType: types.StringType, Optional: true, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, - "include_consumer_groups": resource.ListAttribute{ + "include_consumer_groups": resource.SetAttribute{ Computed: true, Description: "List of custom metrics.", ElementType: types.StringType, Optional: true, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, - "include_topics": resource.ListAttribute{ + "include_topics": resource.SetAttribute{ Computed: true, Description: "List of topics to include.", ElementType: types.StringType, Optional: true, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, - "kafka_custom_metrics": resource.ListAttribute{ + "kafka_custom_metrics": resource.SetAttribute{ Computed: true, Description: "List of custom metrics.", ElementType: types.StringType, Optional: true, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, "max_jmx_metrics": resource.Int64Attribute{ Computed: true, @@ -69,7 +69,7 @@ func NewResourceSchema() resource.ListNestedBlock { }, }, Blocks: map[string]resource.Block{ - "datadog_tags": resource.ListNestedBlock{ + "datadog_tags": resource.SetNestedBlock{ Description: "Custom tags provided by user", NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ "comment": resource.StringAttribute{ @@ -82,9 +82,9 @@ func NewResourceSchema() resource.ListNestedBlock { Required: true, }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(32)}, + Validators: []validator.Set{setvalidator.SizeAtMost(32)}, }, - "opensearch": resource.ListNestedBlock{ + "opensearch": resource.SetNestedBlock{ Description: "Datadog Opensearch Options", NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ "index_stats_enabled": resource.BoolAttribute{ @@ -104,7 +104,7 @@ func NewResourceSchema() resource.ListNestedBlock { }, }}, }, - "redis": resource.ListNestedBlock{ + "redis": resource.SetNestedBlock{ Description: "Datadog Redis Options", NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{"command_stats_enabled": resource.BoolAttribute{ Computed: true, @@ -115,48 +115,48 @@ func NewResourceSchema() resource.ListNestedBlock { }, }, }, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // NewDataSourceSchema returns datasource schema -func NewDataSourceSchema() datasource.ListNestedBlock { - return datasource.ListNestedBlock{ +func NewDataSourceSchema() datasource.SetNestedBlock { + return datasource.SetNestedBlock{ NestedObject: datasource.NestedBlockObject{ Attributes: map[string]datasource.Attribute{ "datadog_dbm_enabled": datasource.BoolAttribute{ Computed: true, Description: "Enable Datadog Database Monitoring.", }, - "exclude_consumer_groups": datasource.ListAttribute{ + "exclude_consumer_groups": datasource.SetAttribute{ Computed: true, Description: "List of custom metrics.", ElementType: types.StringType, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, - "exclude_topics": datasource.ListAttribute{ + "exclude_topics": datasource.SetAttribute{ Computed: true, Description: "List of topics to exclude.", ElementType: types.StringType, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, - "include_consumer_groups": datasource.ListAttribute{ + "include_consumer_groups": datasource.SetAttribute{ Computed: true, Description: "List of custom metrics.", ElementType: types.StringType, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, - "include_topics": datasource.ListAttribute{ + "include_topics": datasource.SetAttribute{ Computed: true, Description: "List of topics to include.", ElementType: types.StringType, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, - "kafka_custom_metrics": datasource.ListAttribute{ + "kafka_custom_metrics": datasource.SetAttribute{ Computed: true, Description: "List of custom metrics.", ElementType: types.StringType, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, "max_jmx_metrics": datasource.Int64Attribute{ Computed: true, @@ -164,7 +164,7 @@ func NewDataSourceSchema() datasource.ListNestedBlock { }, }, Blocks: map[string]datasource.Block{ - "datadog_tags": datasource.ListNestedBlock{ + "datadog_tags": datasource.SetNestedBlock{ Description: "Custom tags provided by user", NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ "comment": datasource.StringAttribute{ @@ -176,9 +176,9 @@ func NewDataSourceSchema() datasource.ListNestedBlock { Description: "Tag format and usage are described here: https://docs.datadoghq.com/getting_started/tagging. Tags with prefix 'aiven-' are reserved for Aiven.", }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(32)}, + Validators: []validator.Set{setvalidator.SizeAtMost(32)}, }, - "opensearch": datasource.ListNestedBlock{ + "opensearch": datasource.SetNestedBlock{ Description: "Datadog Opensearch Options", NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ "index_stats_enabled": datasource.BoolAttribute{ @@ -195,7 +195,7 @@ func NewDataSourceSchema() datasource.ListNestedBlock { }, }}, }, - "redis": datasource.ListNestedBlock{ + "redis": datasource.SetNestedBlock{ Description: "Datadog Redis Options", NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{"command_stats_enabled": datasource.BoolAttribute{ Computed: true, @@ -204,22 +204,22 @@ func NewDataSourceSchema() datasource.ListNestedBlock { }, }, }, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // tfoUserConfig type tfoUserConfig struct { DatadogDbmEnabled types.Bool `tfsdk:"datadog_dbm_enabled"` - DatadogTags types.List `tfsdk:"datadog_tags"` - ExcludeConsumerGroups types.List `tfsdk:"exclude_consumer_groups"` - ExcludeTopics types.List `tfsdk:"exclude_topics"` - IncludeConsumerGroups types.List `tfsdk:"include_consumer_groups"` - IncludeTopics types.List `tfsdk:"include_topics"` - KafkaCustomMetrics types.List `tfsdk:"kafka_custom_metrics"` + DatadogTags types.Set `tfsdk:"datadog_tags"` + ExcludeConsumerGroups types.Set `tfsdk:"exclude_consumer_groups"` + ExcludeTopics types.Set `tfsdk:"exclude_topics"` + IncludeConsumerGroups types.Set `tfsdk:"include_consumer_groups"` + IncludeTopics types.Set `tfsdk:"include_topics"` + KafkaCustomMetrics types.Set `tfsdk:"kafka_custom_metrics"` MaxJmxMetrics types.Int64 `tfsdk:"max_jmx_metrics"` - Opensearch types.List `tfsdk:"opensearch"` - Redis types.List `tfsdk:"redis"` + Opensearch types.Set `tfsdk:"opensearch"` + Redis types.Set `tfsdk:"redis"` } // dtoUserConfig request/response object @@ -238,35 +238,35 @@ type dtoUserConfig struct { // expandUserConfig expands tf object into dto object func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { - datadogTagsVar := schemautil.ExpandListNested[tfoDatadogTags, dtoDatadogTags](ctx, diags, expandDatadogTags, o.DatadogTags) + datadogTagsVar := schemautil.ExpandSetNested[tfoDatadogTags, dtoDatadogTags](ctx, diags, expandDatadogTags, o.DatadogTags) if diags.HasError() { return nil } - excludeConsumerGroupsVar := schemautil.ExpandList[string](ctx, diags, o.ExcludeConsumerGroups) + excludeConsumerGroupsVar := schemautil.ExpandSet[string](ctx, diags, o.ExcludeConsumerGroups) if diags.HasError() { return nil } - excludeTopicsVar := schemautil.ExpandList[string](ctx, diags, o.ExcludeTopics) + excludeTopicsVar := schemautil.ExpandSet[string](ctx, diags, o.ExcludeTopics) if diags.HasError() { return nil } - includeConsumerGroupsVar := schemautil.ExpandList[string](ctx, diags, o.IncludeConsumerGroups) + includeConsumerGroupsVar := schemautil.ExpandSet[string](ctx, diags, o.IncludeConsumerGroups) if diags.HasError() { return nil } - includeTopicsVar := schemautil.ExpandList[string](ctx, diags, o.IncludeTopics) + includeTopicsVar := schemautil.ExpandSet[string](ctx, diags, o.IncludeTopics) if diags.HasError() { return nil } - kafkaCustomMetricsVar := schemautil.ExpandList[string](ctx, diags, o.KafkaCustomMetrics) + kafkaCustomMetricsVar := schemautil.ExpandSet[string](ctx, diags, o.KafkaCustomMetrics) if diags.HasError() { return nil } - opensearchVar := schemautil.ExpandListBlockNested[tfoOpensearch, dtoOpensearch](ctx, diags, expandOpensearch, o.Opensearch) + opensearchVar := schemautil.ExpandSetBlockNested[tfoOpensearch, dtoOpensearch](ctx, diags, expandOpensearch, o.Opensearch) if diags.HasError() { return nil } - redisVar := schemautil.ExpandListBlockNested[tfoRedis, dtoRedis](ctx, diags, expandRedis, o.Redis) + redisVar := schemautil.ExpandSetBlockNested[tfoRedis, dtoRedis](ctx, diags, expandRedis, o.Redis) if diags.HasError() { return nil } @@ -286,40 +286,40 @@ func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserCo // flattenUserConfig flattens dto object into tf object func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { - datadogTagsVar := schemautil.FlattenListNested[dtoDatadogTags, tfoDatadogTags](ctx, diags, flattenDatadogTags, datadogTagsAttrs, o.DatadogTags) + datadogTagsVar := schemautil.FlattenSetNested[dtoDatadogTags, tfoDatadogTags](ctx, diags, flattenDatadogTags, datadogTagsAttrs, o.DatadogTags) if diags.HasError() { return nil } - excludeConsumerGroupsVar, d := types.ListValueFrom(ctx, types.StringType, o.ExcludeConsumerGroups) + excludeConsumerGroupsVar, d := types.SetValueFrom(ctx, types.StringType, o.ExcludeConsumerGroups) diags.Append(d...) if diags.HasError() { return nil } - excludeTopicsVar, d := types.ListValueFrom(ctx, types.StringType, o.ExcludeTopics) + excludeTopicsVar, d := types.SetValueFrom(ctx, types.StringType, o.ExcludeTopics) diags.Append(d...) if diags.HasError() { return nil } - includeConsumerGroupsVar, d := types.ListValueFrom(ctx, types.StringType, o.IncludeConsumerGroups) + includeConsumerGroupsVar, d := types.SetValueFrom(ctx, types.StringType, o.IncludeConsumerGroups) diags.Append(d...) if diags.HasError() { return nil } - includeTopicsVar, d := types.ListValueFrom(ctx, types.StringType, o.IncludeTopics) + includeTopicsVar, d := types.SetValueFrom(ctx, types.StringType, o.IncludeTopics) diags.Append(d...) if diags.HasError() { return nil } - kafkaCustomMetricsVar, d := types.ListValueFrom(ctx, types.StringType, o.KafkaCustomMetrics) + kafkaCustomMetricsVar, d := types.SetValueFrom(ctx, types.StringType, o.KafkaCustomMetrics) diags.Append(d...) if diags.HasError() { return nil } - opensearchVar := schemautil.FlattenListBlockNested[dtoOpensearch, tfoOpensearch](ctx, diags, flattenOpensearch, opensearchAttrs, o.Opensearch) + opensearchVar := schemautil.FlattenSetBlockNested[dtoOpensearch, tfoOpensearch](ctx, diags, flattenOpensearch, opensearchAttrs, o.Opensearch) if diags.HasError() { return nil } - redisVar := schemautil.FlattenListBlockNested[dtoRedis, tfoRedis](ctx, diags, flattenRedis, redisAttrs, o.Redis) + redisVar := schemautil.FlattenSetBlockNested[dtoRedis, tfoRedis](ctx, diags, flattenRedis, redisAttrs, o.Redis) if diags.HasError() { return nil } @@ -339,15 +339,15 @@ func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserC var userConfigAttrs = map[string]attr.Type{ "datadog_dbm_enabled": types.BoolType, - "datadog_tags": types.ListType{ElemType: types.ObjectType{AttrTypes: datadogTagsAttrs}}, - "exclude_consumer_groups": types.ListType{ElemType: types.StringType}, - "exclude_topics": types.ListType{ElemType: types.StringType}, - "include_consumer_groups": types.ListType{ElemType: types.StringType}, - "include_topics": types.ListType{ElemType: types.StringType}, - "kafka_custom_metrics": types.ListType{ElemType: types.StringType}, + "datadog_tags": types.SetType{ElemType: types.ObjectType{AttrTypes: datadogTagsAttrs}}, + "exclude_consumer_groups": types.SetType{ElemType: types.StringType}, + "exclude_topics": types.SetType{ElemType: types.StringType}, + "include_consumer_groups": types.SetType{ElemType: types.StringType}, + "include_topics": types.SetType{ElemType: types.StringType}, + "kafka_custom_metrics": types.SetType{ElemType: types.StringType}, "max_jmx_metrics": types.Int64Type, - "opensearch": types.ListType{ElemType: types.ObjectType{AttrTypes: opensearchAttrs}}, - "redis": types.ListType{ElemType: types.ObjectType{AttrTypes: redisAttrs}}, + "opensearch": types.SetType{ElemType: types.ObjectType{AttrTypes: opensearchAttrs}}, + "redis": types.SetType{ElemType: types.ObjectType{AttrTypes: redisAttrs}}, } // tfoDatadogTags Datadog tag defined by user @@ -444,17 +444,17 @@ func flattenRedis(ctx context.Context, diags *diag.Diagnostics, o *dtoRedis) *tf var redisAttrs = map[string]attr.Type{"command_stats_enabled": types.BoolType} // Expand public function that converts tf object into dto -func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { - return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig { + return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set) } // Flatten public function that converts dto into tf object -func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set { o := new(dtoUserConfig) err := schemautil.MapToDTO(m, o) if err != nil { diags.AddError("failed to marshal map user config to dto", err.Error()) - return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs}) } - return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) + return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) } diff --git a/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics.go b/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics.go index bc0a75e75..c49842976 100644 --- a/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics.go +++ b/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics.go @@ -5,7 +5,7 @@ package externalawscloudwatchmetrics import ( "context" - listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" attr "github.com/hashicorp/terraform-plugin-framework/attr" datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" diag "github.com/hashicorp/terraform-plugin-framework/diag" @@ -17,11 +17,11 @@ import ( ) // NewResourceSchema returns resource schema -func NewResourceSchema() resource.ListNestedBlock { - return resource.ListNestedBlock{ +func NewResourceSchema() resource.SetNestedBlock { + return resource.SetNestedBlock{ Description: "External AWS CloudWatch Metrics integration user config", NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{ - "dropped_metrics": resource.ListNestedBlock{ + "dropped_metrics": resource.SetNestedBlock{ Description: "Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics)", NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ "field": resource.StringAttribute{ @@ -33,9 +33,9 @@ func NewResourceSchema() resource.ListNestedBlock { Required: true, }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, - "extra_metrics": resource.ListNestedBlock{ + "extra_metrics": resource.SetNestedBlock{ Description: "Metrics to allow through to AWS CloudWatch (in addition to default metrics)", NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ "field": resource.StringAttribute{ @@ -47,19 +47,19 @@ func NewResourceSchema() resource.ListNestedBlock { Required: true, }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // NewDataSourceSchema returns datasource schema -func NewDataSourceSchema() datasource.ListNestedBlock { - return datasource.ListNestedBlock{ +func NewDataSourceSchema() datasource.SetNestedBlock { + return datasource.SetNestedBlock{ Description: "External AWS CloudWatch Metrics integration user config", NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{ - "dropped_metrics": datasource.ListNestedBlock{ + "dropped_metrics": datasource.SetNestedBlock{ Description: "Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics)", NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ "field": datasource.StringAttribute{ @@ -71,9 +71,9 @@ func NewDataSourceSchema() datasource.ListNestedBlock { Description: "Identifier of the metric.", }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, - "extra_metrics": datasource.ListNestedBlock{ + "extra_metrics": datasource.SetNestedBlock{ Description: "Metrics to allow through to AWS CloudWatch (in addition to default metrics)", NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ "field": datasource.StringAttribute{ @@ -85,17 +85,17 @@ func NewDataSourceSchema() datasource.ListNestedBlock { Description: "Identifier of the metric.", }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1024)}, }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // tfoUserConfig External AWS CloudWatch Metrics integration user config type tfoUserConfig struct { - DroppedMetrics types.List `tfsdk:"dropped_metrics"` - ExtraMetrics types.List `tfsdk:"extra_metrics"` + DroppedMetrics types.Set `tfsdk:"dropped_metrics"` + ExtraMetrics types.Set `tfsdk:"extra_metrics"` } // dtoUserConfig request/response object @@ -106,11 +106,11 @@ type dtoUserConfig struct { // expandUserConfig expands tf object into dto object func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { - droppedMetricsVar := schemautil.ExpandListNested[tfoDroppedMetrics, dtoDroppedMetrics](ctx, diags, expandDroppedMetrics, o.DroppedMetrics) + droppedMetricsVar := schemautil.ExpandSetNested[tfoDroppedMetrics, dtoDroppedMetrics](ctx, diags, expandDroppedMetrics, o.DroppedMetrics) if diags.HasError() { return nil } - extraMetricsVar := schemautil.ExpandListNested[tfoExtraMetrics, dtoExtraMetrics](ctx, diags, expandExtraMetrics, o.ExtraMetrics) + extraMetricsVar := schemautil.ExpandSetNested[tfoExtraMetrics, dtoExtraMetrics](ctx, diags, expandExtraMetrics, o.ExtraMetrics) if diags.HasError() { return nil } @@ -122,11 +122,11 @@ func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserCo // flattenUserConfig flattens dto object into tf object func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { - droppedMetricsVar := schemautil.FlattenListNested[dtoDroppedMetrics, tfoDroppedMetrics](ctx, diags, flattenDroppedMetrics, droppedMetricsAttrs, o.DroppedMetrics) + droppedMetricsVar := schemautil.FlattenSetNested[dtoDroppedMetrics, tfoDroppedMetrics](ctx, diags, flattenDroppedMetrics, droppedMetricsAttrs, o.DroppedMetrics) if diags.HasError() { return nil } - extraMetricsVar := schemautil.FlattenListNested[dtoExtraMetrics, tfoExtraMetrics](ctx, diags, flattenExtraMetrics, extraMetricsAttrs, o.ExtraMetrics) + extraMetricsVar := schemautil.FlattenSetNested[dtoExtraMetrics, tfoExtraMetrics](ctx, diags, flattenExtraMetrics, extraMetricsAttrs, o.ExtraMetrics) if diags.HasError() { return nil } @@ -137,8 +137,8 @@ func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserC } var userConfigAttrs = map[string]attr.Type{ - "dropped_metrics": types.ListType{ElemType: types.ObjectType{AttrTypes: droppedMetricsAttrs}}, - "extra_metrics": types.ListType{ElemType: types.ObjectType{AttrTypes: extraMetricsAttrs}}, + "dropped_metrics": types.SetType{ElemType: types.ObjectType{AttrTypes: droppedMetricsAttrs}}, + "extra_metrics": types.SetType{ElemType: types.ObjectType{AttrTypes: extraMetricsAttrs}}, } // tfoDroppedMetrics Metric name and subfield @@ -208,17 +208,17 @@ var extraMetricsAttrs = map[string]attr.Type{ } // Expand public function that converts tf object into dto -func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { - return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig { + return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set) } // Flatten public function that converts dto into tf object -func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set { o := new(dtoUserConfig) err := schemautil.MapToDTO(m, o) if err != nil { diags.AddError("failed to marshal map user config to dto", err.Error()) - return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs}) } - return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) + return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) } diff --git a/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect.go b/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect.go index 7593af6cf..9da78cb31 100644 --- a/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect.go +++ b/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect.go @@ -5,7 +5,7 @@ package kafkaconnect import ( "context" - listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" attr "github.com/hashicorp/terraform-plugin-framework/attr" datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" diag "github.com/hashicorp/terraform-plugin-framework/diag" @@ -17,10 +17,10 @@ import ( ) // NewResourceSchema returns resource schema -func NewResourceSchema() resource.ListNestedBlock { - return resource.ListNestedBlock{ +func NewResourceSchema() resource.SetNestedBlock { + return resource.SetNestedBlock{ Description: "Integration user config", - NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"kafka_connect": resource.ListNestedBlock{ + NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"kafka_connect": resource.SetNestedBlock{ Description: "Kafka Connect service configuration values", NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ "config_storage_topic": resource.StringAttribute{ @@ -45,15 +45,15 @@ func NewResourceSchema() resource.ListNestedBlock { }, }}, }}}, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // NewDataSourceSchema returns datasource schema -func NewDataSourceSchema() datasource.ListNestedBlock { - return datasource.ListNestedBlock{ +func NewDataSourceSchema() datasource.SetNestedBlock { + return datasource.SetNestedBlock{ Description: "Integration user config", - NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"kafka_connect": datasource.ListNestedBlock{ + NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"kafka_connect": datasource.SetNestedBlock{ Description: "Kafka Connect service configuration values", NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ "config_storage_topic": datasource.StringAttribute{ @@ -74,13 +74,13 @@ func NewDataSourceSchema() datasource.ListNestedBlock { }, }}, }}}, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // tfoUserConfig Integration user config type tfoUserConfig struct { - KafkaConnect types.List `tfsdk:"kafka_connect"` + KafkaConnect types.Set `tfsdk:"kafka_connect"` } // dtoUserConfig request/response object @@ -90,7 +90,7 @@ type dtoUserConfig struct { // expandUserConfig expands tf object into dto object func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { - kafkaConnectVar := schemautil.ExpandListBlockNested[tfoKafkaConnect, dtoKafkaConnect](ctx, diags, expandKafkaConnect, o.KafkaConnect) + kafkaConnectVar := schemautil.ExpandSetBlockNested[tfoKafkaConnect, dtoKafkaConnect](ctx, diags, expandKafkaConnect, o.KafkaConnect) if diags.HasError() { return nil } @@ -99,14 +99,14 @@ func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserCo // flattenUserConfig flattens dto object into tf object func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { - kafkaConnectVar := schemautil.FlattenListBlockNested[dtoKafkaConnect, tfoKafkaConnect](ctx, diags, flattenKafkaConnect, kafkaConnectAttrs, o.KafkaConnect) + kafkaConnectVar := schemautil.FlattenSetBlockNested[dtoKafkaConnect, tfoKafkaConnect](ctx, diags, flattenKafkaConnect, kafkaConnectAttrs, o.KafkaConnect) if diags.HasError() { return nil } return &tfoUserConfig{KafkaConnect: kafkaConnectVar} } -var userConfigAttrs = map[string]attr.Type{"kafka_connect": types.ListType{ElemType: types.ObjectType{AttrTypes: kafkaConnectAttrs}}} +var userConfigAttrs = map[string]attr.Type{"kafka_connect": types.SetType{ElemType: types.ObjectType{AttrTypes: kafkaConnectAttrs}}} // tfoKafkaConnect Kafka Connect service configuration values type tfoKafkaConnect struct { @@ -152,17 +152,17 @@ var kafkaConnectAttrs = map[string]attr.Type{ } // Expand public function that converts tf object into dto -func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { - return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig { + return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set) } // Flatten public function that converts dto into tf object -func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set { o := new(dtoUserConfig) err := schemautil.MapToDTO(m, o) if err != nil { diags.AddError("failed to marshal map user config to dto", err.Error()) - return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs}) } - return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) + return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) } diff --git a/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs.go b/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs.go index b1c8cb565..9b42c841b 100644 --- a/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs.go +++ b/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs.go @@ -5,7 +5,7 @@ package kafkalogs import ( "context" - listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" attr "github.com/hashicorp/terraform-plugin-framework/attr" datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" diag "github.com/hashicorp/terraform-plugin-framework/diag" @@ -17,48 +17,48 @@ import ( ) // NewResourceSchema returns resource schema -func NewResourceSchema() resource.ListNestedBlock { - return resource.ListNestedBlock{ +func NewResourceSchema() resource.SetNestedBlock { + return resource.SetNestedBlock{ NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ "kafka_topic": resource.StringAttribute{ Description: "Topic name.", Required: true, }, - "selected_log_fields": resource.ListAttribute{ + "selected_log_fields": resource.SetAttribute{ Computed: true, Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", ElementType: types.StringType, Optional: true, - Validators: []validator.List{listvalidator.SizeAtMost(5)}, + Validators: []validator.Set{setvalidator.SizeAtMost(5)}, }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // NewDataSourceSchema returns datasource schema -func NewDataSourceSchema() datasource.ListNestedBlock { - return datasource.ListNestedBlock{ +func NewDataSourceSchema() datasource.SetNestedBlock { + return datasource.SetNestedBlock{ NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ "kafka_topic": datasource.StringAttribute{ Computed: true, Description: "Topic name.", }, - "selected_log_fields": datasource.ListAttribute{ + "selected_log_fields": datasource.SetAttribute{ Computed: true, Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", ElementType: types.StringType, - Validators: []validator.List{listvalidator.SizeAtMost(5)}, + Validators: []validator.Set{setvalidator.SizeAtMost(5)}, }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // tfoUserConfig type tfoUserConfig struct { KafkaTopic types.String `tfsdk:"kafka_topic"` - SelectedLogFields types.List `tfsdk:"selected_log_fields"` + SelectedLogFields types.Set `tfsdk:"selected_log_fields"` } // dtoUserConfig request/response object @@ -69,7 +69,7 @@ type dtoUserConfig struct { // expandUserConfig expands tf object into dto object func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { - selectedLogFieldsVar := schemautil.ExpandList[string](ctx, diags, o.SelectedLogFields) + selectedLogFieldsVar := schemautil.ExpandSet[string](ctx, diags, o.SelectedLogFields) if diags.HasError() { return nil } @@ -81,7 +81,7 @@ func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserCo // flattenUserConfig flattens dto object into tf object func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { - selectedLogFieldsVar, d := types.ListValueFrom(ctx, types.StringType, o.SelectedLogFields) + selectedLogFieldsVar, d := types.SetValueFrom(ctx, types.StringType, o.SelectedLogFields) diags.Append(d...) if diags.HasError() { return nil @@ -94,21 +94,21 @@ func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserC var userConfigAttrs = map[string]attr.Type{ "kafka_topic": types.StringType, - "selected_log_fields": types.ListType{ElemType: types.StringType}, + "selected_log_fields": types.SetType{ElemType: types.StringType}, } // Expand public function that converts tf object into dto -func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { - return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig { + return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set) } // Flatten public function that converts dto into tf object -func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set { o := new(dtoUserConfig) err := schemautil.MapToDTO(m, o) if err != nil { diags.AddError("failed to marshal map user config to dto", err.Error()) - return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs}) } - return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) + return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) } diff --git a/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker.go b/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker.go index 548514588..de8db7181 100644 --- a/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker.go +++ b/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker.go @@ -5,7 +5,7 @@ package kafkamirrormaker import ( "context" - listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" attr "github.com/hashicorp/terraform-plugin-framework/attr" datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" diag "github.com/hashicorp/terraform-plugin-framework/diag" @@ -17,8 +17,8 @@ import ( ) // NewResourceSchema returns resource schema -func NewResourceSchema() resource.ListNestedBlock { - return resource.ListNestedBlock{ +func NewResourceSchema() resource.SetNestedBlock { + return resource.SetNestedBlock{ Description: "Integration user config", NestedObject: resource.NestedBlockObject{ Attributes: map[string]resource.Attribute{"cluster_alias": resource.StringAttribute{ @@ -26,7 +26,7 @@ func NewResourceSchema() resource.ListNestedBlock { Description: "The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, '.', '_', and '-'.", Optional: true, }}, - Blocks: map[string]resource.Block{"kafka_mirrormaker": resource.ListNestedBlock{ + Blocks: map[string]resource.Block{"kafka_mirrormaker": resource.SetNestedBlock{ Description: "Kafka MirrorMaker configuration values", NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ "consumer_fetch_min_bytes": resource.Int64Attribute{ @@ -62,20 +62,20 @@ func NewResourceSchema() resource.ListNestedBlock { }}, }}, }, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // NewDataSourceSchema returns datasource schema -func NewDataSourceSchema() datasource.ListNestedBlock { - return datasource.ListNestedBlock{ +func NewDataSourceSchema() datasource.SetNestedBlock { + return datasource.SetNestedBlock{ Description: "Integration user config", NestedObject: datasource.NestedBlockObject{ Attributes: map[string]datasource.Attribute{"cluster_alias": datasource.StringAttribute{ Computed: true, Description: "The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, '.', '_', and '-'.", }}, - Blocks: map[string]datasource.Block{"kafka_mirrormaker": datasource.ListNestedBlock{ + Blocks: map[string]datasource.Block{"kafka_mirrormaker": datasource.SetNestedBlock{ Description: "Kafka MirrorMaker configuration values", NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ "consumer_fetch_min_bytes": datasource.Int64Attribute{ @@ -105,14 +105,14 @@ func NewDataSourceSchema() datasource.ListNestedBlock { }}, }}, }, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // tfoUserConfig Integration user config type tfoUserConfig struct { ClusterAlias types.String `tfsdk:"cluster_alias"` - KafkaMirrormaker types.List `tfsdk:"kafka_mirrormaker"` + KafkaMirrormaker types.Set `tfsdk:"kafka_mirrormaker"` } // dtoUserConfig request/response object @@ -123,7 +123,7 @@ type dtoUserConfig struct { // expandUserConfig expands tf object into dto object func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { - kafkaMirrormakerVar := schemautil.ExpandListBlockNested[tfoKafkaMirrormaker, dtoKafkaMirrormaker](ctx, diags, expandKafkaMirrormaker, o.KafkaMirrormaker) + kafkaMirrormakerVar := schemautil.ExpandSetBlockNested[tfoKafkaMirrormaker, dtoKafkaMirrormaker](ctx, diags, expandKafkaMirrormaker, o.KafkaMirrormaker) if diags.HasError() { return nil } @@ -135,7 +135,7 @@ func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserCo // flattenUserConfig flattens dto object into tf object func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { - kafkaMirrormakerVar := schemautil.FlattenListBlockNested[dtoKafkaMirrormaker, tfoKafkaMirrormaker](ctx, diags, flattenKafkaMirrormaker, kafkaMirrormakerAttrs, o.KafkaMirrormaker) + kafkaMirrormakerVar := schemautil.FlattenSetBlockNested[dtoKafkaMirrormaker, tfoKafkaMirrormaker](ctx, diags, flattenKafkaMirrormaker, kafkaMirrormakerAttrs, o.KafkaMirrormaker) if diags.HasError() { return nil } @@ -147,7 +147,7 @@ func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserC var userConfigAttrs = map[string]attr.Type{ "cluster_alias": types.StringType, - "kafka_mirrormaker": types.ListType{ElemType: types.ObjectType{AttrTypes: kafkaMirrormakerAttrs}}, + "kafka_mirrormaker": types.SetType{ElemType: types.ObjectType{AttrTypes: kafkaMirrormakerAttrs}}, } // tfoKafkaMirrormaker Kafka MirrorMaker configuration values @@ -204,17 +204,17 @@ var kafkaMirrormakerAttrs = map[string]attr.Type{ } // Expand public function that converts tf object into dto -func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { - return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig { + return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set) } // Flatten public function that converts dto into tf object -func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set { o := new(dtoUserConfig) err := schemautil.MapToDTO(m, o) if err != nil { diags.AddError("failed to marshal map user config to dto", err.Error()) - return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs}) } - return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) + return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) } diff --git a/internal/plugin/service/userconfig/integration/logs/logs.go b/internal/plugin/service/userconfig/integration/logs/logs.go index 7a9be7d51..1916dcdcc 100644 --- a/internal/plugin/service/userconfig/integration/logs/logs.go +++ b/internal/plugin/service/userconfig/integration/logs/logs.go @@ -5,7 +5,7 @@ package logs import ( "context" - listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" attr "github.com/hashicorp/terraform-plugin-framework/attr" datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" diag "github.com/hashicorp/terraform-plugin-framework/diag" @@ -19,8 +19,8 @@ import ( ) // NewResourceSchema returns resource schema -func NewResourceSchema() resource.ListNestedBlock { - return resource.ListNestedBlock{ +func NewResourceSchema() resource.SetNestedBlock { + return resource.SetNestedBlock{ NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ "elasticsearch_index_days_max": resource.Int64Attribute{ Computed: true, @@ -34,21 +34,21 @@ func NewResourceSchema() resource.ListNestedBlock { Description: "Elasticsearch index prefix. The default value is `logs`.", Optional: true, }, - "selected_log_fields": resource.ListAttribute{ + "selected_log_fields": resource.SetAttribute{ Computed: true, Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", ElementType: types.StringType, Optional: true, - Validators: []validator.List{listvalidator.SizeAtMost(5)}, + Validators: []validator.Set{setvalidator.SizeAtMost(5)}, }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // NewDataSourceSchema returns datasource schema -func NewDataSourceSchema() datasource.ListNestedBlock { - return datasource.ListNestedBlock{ +func NewDataSourceSchema() datasource.SetNestedBlock { + return datasource.SetNestedBlock{ NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ "elasticsearch_index_days_max": datasource.Int64Attribute{ Computed: true, @@ -58,14 +58,14 @@ func NewDataSourceSchema() datasource.ListNestedBlock { Computed: true, Description: "Elasticsearch index prefix. The default value is `logs`.", }, - "selected_log_fields": datasource.ListAttribute{ + "selected_log_fields": datasource.SetAttribute{ Computed: true, Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", ElementType: types.StringType, - Validators: []validator.List{listvalidator.SizeAtMost(5)}, + Validators: []validator.Set{setvalidator.SizeAtMost(5)}, }, }}, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } @@ -73,7 +73,7 @@ func NewDataSourceSchema() datasource.ListNestedBlock { type tfoUserConfig struct { ElasticsearchIndexDaysMax types.Int64 `tfsdk:"elasticsearch_index_days_max"` ElasticsearchIndexPrefix types.String `tfsdk:"elasticsearch_index_prefix"` - SelectedLogFields types.List `tfsdk:"selected_log_fields"` + SelectedLogFields types.Set `tfsdk:"selected_log_fields"` } // dtoUserConfig request/response object @@ -85,7 +85,7 @@ type dtoUserConfig struct { // expandUserConfig expands tf object into dto object func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { - selectedLogFieldsVar := schemautil.ExpandList[string](ctx, diags, o.SelectedLogFields) + selectedLogFieldsVar := schemautil.ExpandSet[string](ctx, diags, o.SelectedLogFields) if diags.HasError() { return nil } @@ -98,7 +98,7 @@ func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserCo // flattenUserConfig flattens dto object into tf object func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { - selectedLogFieldsVar, d := types.ListValueFrom(ctx, types.StringType, o.SelectedLogFields) + selectedLogFieldsVar, d := types.SetValueFrom(ctx, types.StringType, o.SelectedLogFields) diags.Append(d...) if diags.HasError() { return nil @@ -113,21 +113,21 @@ func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserC var userConfigAttrs = map[string]attr.Type{ "elasticsearch_index_days_max": types.Int64Type, "elasticsearch_index_prefix": types.StringType, - "selected_log_fields": types.ListType{ElemType: types.StringType}, + "selected_log_fields": types.SetType{ElemType: types.StringType}, } // Expand public function that converts tf object into dto -func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { - return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig { + return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set) } // Flatten public function that converts dto into tf object -func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set { o := new(dtoUserConfig) err := schemautil.MapToDTO(m, o) if err != nil { diags.AddError("failed to marshal map user config to dto", err.Error()) - return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs}) } - return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) + return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) } diff --git a/internal/plugin/service/userconfig/integration/metrics/metrics.go b/internal/plugin/service/userconfig/integration/metrics/metrics.go index 7670348fc..3fb712119 100644 --- a/internal/plugin/service/userconfig/integration/metrics/metrics.go +++ b/internal/plugin/service/userconfig/integration/metrics/metrics.go @@ -5,7 +5,7 @@ package metrics import ( "context" - listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" attr "github.com/hashicorp/terraform-plugin-framework/attr" datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" diag "github.com/hashicorp/terraform-plugin-framework/diag" @@ -17,8 +17,8 @@ import ( ) // NewResourceSchema returns resource schema -func NewResourceSchema() resource.ListNestedBlock { - return resource.ListNestedBlock{ +func NewResourceSchema() resource.SetNestedBlock { + return resource.SetNestedBlock{ Description: "Integration user config", NestedObject: resource.NestedBlockObject{ Attributes: map[string]resource.Attribute{ @@ -43,9 +43,9 @@ func NewResourceSchema() resource.ListNestedBlock { Optional: true, }, }, - Blocks: map[string]resource.Block{"source_mysql": resource.ListNestedBlock{ + Blocks: map[string]resource.Block{"source_mysql": resource.SetNestedBlock{ Description: "Configuration options for metrics where source service is MySQL", - NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"telegraf": resource.ListNestedBlock{ + NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"telegraf": resource.SetNestedBlock{ Description: "Configuration options for Telegraf MySQL input plugin", NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ "gather_event_waits": resource.BoolAttribute{ @@ -122,13 +122,13 @@ func NewResourceSchema() resource.ListNestedBlock { }}}, }}, }, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } // NewDataSourceSchema returns datasource schema -func NewDataSourceSchema() datasource.ListNestedBlock { - return datasource.ListNestedBlock{ +func NewDataSourceSchema() datasource.SetNestedBlock { + return datasource.SetNestedBlock{ Description: "Integration user config", NestedObject: datasource.NestedBlockObject{ Attributes: map[string]datasource.Attribute{ @@ -149,9 +149,9 @@ func NewDataSourceSchema() datasource.ListNestedBlock { Description: "Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to 'metrics_writer'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.", }, }, - Blocks: map[string]datasource.Block{"source_mysql": datasource.ListNestedBlock{ + Blocks: map[string]datasource.Block{"source_mysql": datasource.SetNestedBlock{ Description: "Configuration options for metrics where source service is MySQL", - NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"telegraf": datasource.ListNestedBlock{ + NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"telegraf": datasource.SetNestedBlock{ Description: "Configuration options for Telegraf MySQL input plugin", NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ "gather_event_waits": datasource.BoolAttribute{ @@ -214,7 +214,7 @@ func NewDataSourceSchema() datasource.ListNestedBlock { }}}, }}, }, - Validators: []validator.List{listvalidator.SizeAtMost(1)}, + Validators: []validator.Set{setvalidator.SizeAtMost(1)}, } } @@ -223,7 +223,7 @@ type tfoUserConfig struct { Database types.String `tfsdk:"database"` RetentionDays types.Int64 `tfsdk:"retention_days"` RoUsername types.String `tfsdk:"ro_username"` - SourceMysql types.List `tfsdk:"source_mysql"` + SourceMysql types.Set `tfsdk:"source_mysql"` Username types.String `tfsdk:"username"` } @@ -238,7 +238,7 @@ type dtoUserConfig struct { // expandUserConfig expands tf object into dto object func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { - sourceMysqlVar := schemautil.ExpandListBlockNested[tfoSourceMysql, dtoSourceMysql](ctx, diags, expandSourceMysql, o.SourceMysql) + sourceMysqlVar := schemautil.ExpandSetBlockNested[tfoSourceMysql, dtoSourceMysql](ctx, diags, expandSourceMysql, o.SourceMysql) if diags.HasError() { return nil } @@ -253,7 +253,7 @@ func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserCo // flattenUserConfig flattens dto object into tf object func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { - sourceMysqlVar := schemautil.FlattenListBlockNested[dtoSourceMysql, tfoSourceMysql](ctx, diags, flattenSourceMysql, sourceMysqlAttrs, o.SourceMysql) + sourceMysqlVar := schemautil.FlattenSetBlockNested[dtoSourceMysql, tfoSourceMysql](ctx, diags, flattenSourceMysql, sourceMysqlAttrs, o.SourceMysql) if diags.HasError() { return nil } @@ -270,13 +270,13 @@ var userConfigAttrs = map[string]attr.Type{ "database": types.StringType, "retention_days": types.Int64Type, "ro_username": types.StringType, - "source_mysql": types.ListType{ElemType: types.ObjectType{AttrTypes: sourceMysqlAttrs}}, + "source_mysql": types.SetType{ElemType: types.ObjectType{AttrTypes: sourceMysqlAttrs}}, "username": types.StringType, } // tfoSourceMysql Configuration options for metrics where source service is MySQL type tfoSourceMysql struct { - Telegraf types.List `tfsdk:"telegraf"` + Telegraf types.Set `tfsdk:"telegraf"` } // dtoSourceMysql request/response object @@ -286,7 +286,7 @@ type dtoSourceMysql struct { // expandSourceMysql expands tf object into dto object func expandSourceMysql(ctx context.Context, diags *diag.Diagnostics, o *tfoSourceMysql) *dtoSourceMysql { - telegrafVar := schemautil.ExpandListBlockNested[tfoTelegraf, dtoTelegraf](ctx, diags, expandTelegraf, o.Telegraf) + telegrafVar := schemautil.ExpandSetBlockNested[tfoTelegraf, dtoTelegraf](ctx, diags, expandTelegraf, o.Telegraf) if diags.HasError() { return nil } @@ -295,14 +295,14 @@ func expandSourceMysql(ctx context.Context, diags *diag.Diagnostics, o *tfoSourc // flattenSourceMysql flattens dto object into tf object func flattenSourceMysql(ctx context.Context, diags *diag.Diagnostics, o *dtoSourceMysql) *tfoSourceMysql { - telegrafVar := schemautil.FlattenListBlockNested[dtoTelegraf, tfoTelegraf](ctx, diags, flattenTelegraf, telegrafAttrs, o.Telegraf) + telegrafVar := schemautil.FlattenSetBlockNested[dtoTelegraf, tfoTelegraf](ctx, diags, flattenTelegraf, telegrafAttrs, o.Telegraf) if diags.HasError() { return nil } return &tfoSourceMysql{Telegraf: telegrafVar} } -var sourceMysqlAttrs = map[string]attr.Type{"telegraf": types.ListType{ElemType: types.ObjectType{AttrTypes: telegrafAttrs}}} +var sourceMysqlAttrs = map[string]attr.Type{"telegraf": types.SetType{ElemType: types.ObjectType{AttrTypes: telegrafAttrs}}} // tfoTelegraf Configuration options for Telegraf MySQL input plugin type tfoTelegraf struct { @@ -398,17 +398,17 @@ var telegrafAttrs = map[string]attr.Type{ } // Expand public function that converts tf object into dto -func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { - return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig { + return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set) } // Flatten public function that converts dto into tf object -func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set { o := new(dtoUserConfig) err := schemautil.MapToDTO(m, o) if err != nil { diags.AddError("failed to marshal map user config to dto", err.Error()) - return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs}) } - return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) + return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) } diff --git a/internal/schemautil/plugin.go b/internal/schemautil/plugin.go index b06dd8635..6f94a84e5 100644 --- a/internal/schemautil/plugin.go +++ b/internal/schemautil/plugin.go @@ -11,7 +11,7 @@ import ( "github.com/liip/sheriff" ) -func ExpandList[T any](ctx context.Context, diags *diag.Diagnostics, list types.List) (items []T) { +func ExpandSet[T any](ctx context.Context, diags *diag.Diagnostics, list types.Set) (items []T) { if list.IsUnknown() || list.IsNull() { return nil } @@ -21,8 +21,8 @@ func ExpandList[T any](ctx context.Context, diags *diag.Diagnostics, list types. type Expander[T, K any] func(ctx context.Context, diags *diag.Diagnostics, o *T) *K -func ExpandListNested[T, K any](ctx context.Context, diags *diag.Diagnostics, expand Expander[T, K], list types.List) []*K { - expanded := ExpandList[T](ctx, diags, list) +func ExpandSetNested[T, K any](ctx context.Context, diags *diag.Diagnostics, expand Expander[T, K], list types.Set) []*K { + expanded := ExpandSet[T](ctx, diags, list) if expanded == nil || diags.HasError() { return nil } @@ -37,8 +37,8 @@ func ExpandListNested[T, K any](ctx context.Context, diags *diag.Diagnostics, ex return items } -func ExpandListBlockNested[T, K any](ctx context.Context, diags *diag.Diagnostics, expand Expander[T, K], list types.List) *K { - items := ExpandListNested(ctx, diags, expand, list) +func ExpandSetBlockNested[T, K any](ctx context.Context, diags *diag.Diagnostics, expand Expander[T, K], list types.Set) *K { + items := ExpandSetNested(ctx, diags, expand, list) if len(items) == 0 { return nil } @@ -47,9 +47,9 @@ func ExpandListBlockNested[T, K any](ctx context.Context, diags *diag.Diagnostic type Flattener[T, K any] func(ctx context.Context, diags *diag.Diagnostics, o *T) *K -func FlattenListNested[T, K any](ctx context.Context, diags *diag.Diagnostics, flatten Flattener[T, K], attrs map[string]attr.Type, list []*T) types.List { +func FlattenSetNested[T, K any](ctx context.Context, diags *diag.Diagnostics, flatten Flattener[T, K], attrs map[string]attr.Type, list []*T) types.Set { oType := types.ObjectType{AttrTypes: attrs} - empty := types.ListValueMust(oType, []attr.Value{}) + empty := types.SetValueMust(oType, []attr.Value{}) items := make([]*K, 0, len(list)) for _, v := range list { items = append(items, flatten(ctx, diags, v)) @@ -58,7 +58,7 @@ func FlattenListNested[T, K any](ctx context.Context, diags *diag.Diagnostics, f } } - result, d := types.ListValueFrom(ctx, oType, items) + result, d := types.SetValueFrom(ctx, oType, items) diags.Append(d...) if diags.HasError() { return empty @@ -66,11 +66,11 @@ func FlattenListNested[T, K any](ctx context.Context, diags *diag.Diagnostics, f return result } -func FlattenListBlockNested[T, K any](ctx context.Context, diags *diag.Diagnostics, flatten Flattener[T, K], attrs map[string]attr.Type, o *T) types.List { +func FlattenSetBlockNested[T, K any](ctx context.Context, diags *diag.Diagnostics, flatten Flattener[T, K], attrs map[string]attr.Type, o *T) types.Set { if o == nil { - return types.ListValueMust(types.ObjectType{AttrTypes: attrs}, []attr.Value{}) + return types.SetValueMust(types.ObjectType{AttrTypes: attrs}, []attr.Value{}) } - return FlattenListNested(ctx, diags, flatten, attrs, []*T{o}) + return FlattenSetNested(ctx, diags, flatten, attrs, []*T{o}) } // marshalUserConfig converts user config into json diff --git a/internal/sdkprovider/service/serviceintegration/service_integration.go b/internal/sdkprovider/service/serviceintegration/service_integration.go deleted file mode 100644 index 72a4e7042..000000000 --- a/internal/sdkprovider/service/serviceintegration/service_integration.go +++ /dev/null @@ -1,398 +0,0 @@ -package serviceintegration - -import ( - "context" - "fmt" - "log" - "regexp" - "time" - - "github.com/aiven/aiven-go-client/v2" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/hashicorp/terraform-plugin-testing/helper/resource" - - "github.com/aiven/terraform-provider-aiven/internal/schemautil" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/apiconvert" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist" - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader" -) - -const serviceIntegrationEndpointRegExp = "^[a-zA-Z0-9_-]*\\/{1}[a-zA-Z0-9_-]*$" - -var integrationTypes = []string{ - "alertmanager", - "cassandra_cross_service_cluster", - "clickhouse_kafka", - "clickhouse_postgresql", - "dashboard", - "datadog", - "datasource", - "external_aws_cloudwatch_logs", - "external_aws_cloudwatch_metrics", - "external_elasticsearch_logs", - "external_google_cloud_logging", - "external_opensearch_logs", - "flink", - "internal_connectivity", - "jolokia", - "kafka_connect", - "kafka_logs", - "kafka_mirrormaker", - "logs", - "m3aggregator", - "m3coordinator", - "metrics", - "opensearch_cross_cluster_replication", - "opensearch_cross_cluster_search", - "prometheus", - "read_replica", - "rsyslog", - "schema_registry_proxy", -} - -var aivenServiceIntegrationSchema = map[string]*schema.Schema{ - "integration_id": { - Description: "Service Integration Id at aiven", - Computed: true, - Type: schema.TypeString, - }, - "destination_endpoint_id": { - Description: "Destination endpoint for the integration (if any)", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - ValidateFunc: validation.StringMatch(regexp.MustCompile(serviceIntegrationEndpointRegExp), - "endpoint id should have the following format: project_name/endpoint_id"), - }, - "destination_service_name": { - Description: "Destination service for the integration (if any)", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "integration_type": { - Description: "Type of the service integration. Possible values: " + schemautil.JoinQuoted(integrationTypes, ", ", "`"), - ForceNew: true, - Required: true, - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(integrationTypes, false), - }, - "project": { - Description: "Project the integration belongs to", - ForceNew: true, - Required: true, - Type: schema.TypeString, - }, - "source_endpoint_id": { - Description: "Source endpoint for the integration (if any)", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - ValidateFunc: validation.StringMatch(regexp.MustCompile(serviceIntegrationEndpointRegExp), - "endpoint id should have the following format: project_name/endpoint_id"), - }, - "source_service_name": { - Description: "Source service for the integration (if any)", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "logs_user_config": dist.IntegrationTypeLogs(), - "kafka_mirrormaker_user_config": dist.IntegrationTypeKafkaMirrormaker(), - "kafka_connect_user_config": dist.IntegrationTypeKafkaConnect(), - "kafka_logs_user_config": dist.IntegrationTypeKafkaLogs(), - "metrics_user_config": dist.IntegrationTypeMetrics(), - "datadog_user_config": dist.IntegrationTypeDatadog(), - "clickhouse_kafka_user_config": dist.IntegrationTypeClickhouseKafka(), - "clickhouse_postgresql_user_config": dist.IntegrationTypeClickhousePostgresql(), - "external_aws_cloudwatch_metrics_user_config": dist.IntegrationTypeExternalAwsCloudwatchMetrics(), -} - -func ResourceServiceIntegration() *schema.Resource { - return &schema.Resource{ - Description: "The Service Integration resource allows the creation and management of Aiven Service Integrations.", - CreateContext: resourceServiceIntegrationCreate, - ReadContext: resourceServiceIntegrationRead, - UpdateContext: resourceServiceIntegrationUpdate, - DeleteContext: resourceServiceIntegrationDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Timeouts: schemautil.DefaultResourceTimeouts(), - - Schema: aivenServiceIntegrationSchema, - SchemaVersion: 1, - StateUpgraders: stateupgrader.ServiceIntegration(), - } -} - -func plainEndpointID(fullEndpointID *string) *string { - if fullEndpointID == nil { - return nil - } - _, endpointID, err := schemautil.SplitResourceID2(*fullEndpointID) - if err != nil { - return nil - } - return &endpointID -} - -func resourceServiceIntegrationCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - client := m.(*aiven.Client) - - projectName := d.Get("project").(string) - integrationType := d.Get("integration_type").(string) - - // read_replicas can be only be created alongside the service. also the only way to promote the replica - // is to delete the service integration that was created so we should make it least painful to do so. - // for now we support to seemlessly import preexisting 'read_replica' service integrations in the resource create - // all other integrations should be imported using `terraform import` - if integrationType == "read_replica" { - if preexisting, err := resourceServiceIntegrationCheckForPreexistingResource(ctx, d, m); err != nil { - return diag.Errorf("unable to search for possible preexisting 'read_replica' service integration: %s", err) - } else if preexisting != nil { - d.SetId(schemautil.BuildResourceID(projectName, preexisting.ServiceIntegrationID)) - return resourceServiceIntegrationRead(ctx, d, m) - } - } - - uc, err := resourceServiceIntegrationUserConfigFromSchemaToAPI(d) - if err != nil { - return diag.FromErr(err) - } - - integration, err := client.ServiceIntegrations.Create( - ctx, - projectName, - aiven.CreateServiceIntegrationRequest{ - DestinationEndpointID: plainEndpointID(schemautil.OptionalStringPointer(d, "destination_endpoint_id")), - DestinationService: schemautil.OptionalStringPointer(d, "destination_service_name"), - IntegrationType: integrationType, - SourceEndpointID: plainEndpointID(schemautil.OptionalStringPointer(d, "source_endpoint_id")), - SourceService: schemautil.OptionalStringPointer(d, "source_service_name"), - UserConfig: uc, - }, - ) - if err != nil { - return diag.Errorf("error creating serivce integration: %s", err) - } - d.SetId(schemautil.BuildResourceID(projectName, integration.ServiceIntegrationID)) - - if err = resourceServiceIntegrationWaitUntilActive(ctx, d, m); err != nil { - return diag.Errorf("unable to wait for service integration to become active: %s", err) - } - return resourceServiceIntegrationRead(ctx, d, m) -} - -func resourceServiceIntegrationRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - client := m.(*aiven.Client) - - projectName, integrationID, err := schemautil.SplitResourceID2(d.Id()) - if err != nil { - return diag.FromErr(err) - } - - integration, err := client.ServiceIntegrations.Get(ctx, projectName, integrationID) - if err != nil { - err = schemautil.ResourceReadHandleNotFound(err, d) - if err != nil { - return diag.Errorf("cannot get service integration: %s; id: %s", err, integrationID) - } - return nil - } - - if err = resourceServiceIntegrationCopyAPIResponseToTerraform(d, integration, projectName); err != nil { - return diag.Errorf("cannot copy api response into terraform schema: %s", err) - } - - return nil -} - -func resourceServiceIntegrationUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - client := m.(*aiven.Client) - - projectName, integrationID, err := schemautil.SplitResourceID2(d.Id()) - if err != nil { - return diag.FromErr(err) - } - - userConfig, err := resourceServiceIntegrationUserConfigFromSchemaToAPI(d) - if err != nil { - return diag.FromErr(err) - } - - if userConfig == nil { - // Required by API - userConfig = make(map[string]interface{}) - } - - _, err = client.ServiceIntegrations.Update( - ctx, - projectName, - integrationID, - aiven.UpdateServiceIntegrationRequest{ - UserConfig: userConfig, - }, - ) - if err != nil { - return diag.Errorf("unable to update service integration: %s", err) - } - if err = resourceServiceIntegrationWaitUntilActive(ctx, d, m); err != nil { - return diag.Errorf("unable to wait for service integration to become active: %s", err) - } - - return resourceServiceIntegrationRead(ctx, d, m) -} - -func resourceServiceIntegrationDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - client := m.(*aiven.Client) - - projectName, integrationID, err := schemautil.SplitResourceID2(d.Id()) - if err != nil { - return diag.FromErr(err) - } - - err = client.ServiceIntegrations.Delete(ctx, projectName, integrationID) - if err != nil && !aiven.IsNotFound(err) { - return diag.Errorf("cannot delete service integration: %s", err) - } - - return nil -} - -func resourceServiceIntegrationCheckForPreexistingResource(ctx context.Context, d *schema.ResourceData, m interface{}) (*aiven.ServiceIntegration, error) { - client := m.(*aiven.Client) - - projectName := d.Get("project").(string) - integrationType := d.Get("integration_type").(string) - sourceServiceName := d.Get("source_service_name").(string) - destinationServiceName := d.Get("destination_service_name").(string) - - integrations, err := client.ServiceIntegrations.List(ctx, projectName, sourceServiceName) - if err != nil && !aiven.IsNotFound(err) { - return nil, fmt.Errorf("unable to get list of service integrations: %s", err) - } - - for i := range integrations { - integration := integrations[i] - if integration.SourceService == nil || integration.DestinationService == nil || integration.ServiceIntegrationID == "" { - continue - } - - if integration.IntegrationType == integrationType && - *integration.SourceService == sourceServiceName && - *integration.DestinationService == destinationServiceName { - return integration, nil - } - } - return nil, nil -} - -// nolint:staticcheck // TODO: Migrate to helper/retry package to avoid deprecated resource.StateRefreshFunc. -func resourceServiceIntegrationWaitUntilActive(ctx context.Context, d *schema.ResourceData, m interface{}) error { - const ( - active = "ACTIVE" - notActive = "NOTACTIVE" - ) - client := m.(*aiven.Client) - - projectName, integrationID, err := schemautil.SplitResourceID2(d.Id()) - if err != nil { - return err - } - - stateChangeConf := &resource.StateChangeConf{ - Pending: []string{notActive}, - Target: []string{active}, - Refresh: func() (interface{}, string, error) { - log.Println("[DEBUG] Service Integration: waiting until active") - - ii, err := client.ServiceIntegrations.Get(ctx, projectName, integrationID) - if err != nil { - // Sometimes Aiven API retrieves 404 error even when a successful service integration is created - if aiven.IsNotFound(err) { - log.Println("[DEBUG] Service Integration: not yet found") - return nil, notActive, nil - } - return nil, "", err - } - if !ii.Active { - log.Println("[DEBUG] Service Integration: not yet active") - return nil, notActive, nil - } - - if ii.IntegrationType == "kafka_connect" && ii.DestinationService != nil { - if _, err := client.KafkaConnectors.List(ctx, projectName, *ii.DestinationService); err != nil { - log.Println("[DEBUG] Service Integration: error listing kafka connectors: ", err) - return nil, notActive, nil - } - } - return ii, active, nil - }, - Delay: 2 * time.Second, - Timeout: d.Timeout(schema.TimeoutCreate), - MinTimeout: 2 * time.Second, - ContinuousTargetOccurence: 10, - } - if _, err := stateChangeConf.WaitForStateContext(ctx); err != nil { - return err - } - return nil -} - -func resourceServiceIntegrationUserConfigFromSchemaToAPI(d *schema.ResourceData) (map[string]interface{}, error) { - integrationType := d.Get("integration_type").(string) - return apiconvert.ToAPI(userconfig.IntegrationTypes, integrationType, d) -} - -func resourceServiceIntegrationCopyAPIResponseToTerraform( - d *schema.ResourceData, - integration *aiven.ServiceIntegration, - project string, -) error { - if err := d.Set("project", project); err != nil { - return err - } - - if integration.DestinationEndpointID != nil { - if err := d.Set("destination_endpoint_id", schemautil.BuildResourceID(project, *integration.DestinationEndpointID)); err != nil { - return err - } - } else if integration.DestinationService != nil { - if err := d.Set("destination_service_name", *integration.DestinationService); err != nil { - return err - } - } - if integration.SourceEndpointID != nil { - if err := d.Set("source_endpoint_id", schemautil.BuildResourceID(project, *integration.SourceEndpointID)); err != nil { - return err - } - } else if integration.SourceService != nil { - if err := d.Set("source_service_name", *integration.SourceService); err != nil { - return err - } - } - if err := d.Set("integration_id", integration.ServiceIntegrationID); err != nil { - return err - } - integrationType := integration.IntegrationType - if err := d.Set("integration_type", integrationType); err != nil { - return err - } - - userConfig, err := apiconvert.FromAPI(userconfig.IntegrationTypes, integrationType, integration.UserConfig) - if err != nil { - return err - } - - if len(userConfig) > 0 { - if err := d.Set(integrationType+"_user_config", userConfig); err != nil { - return err - } - } - - return nil -} diff --git a/internal/sdkprovider/service/serviceintegration/service_integration_data_source.go b/internal/sdkprovider/service/serviceintegration/service_integration_data_source.go deleted file mode 100644 index 014bcc8da..000000000 --- a/internal/sdkprovider/service/serviceintegration/service_integration_data_source.go +++ /dev/null @@ -1,51 +0,0 @@ -package serviceintegration - -import ( - "context" - - "github.com/aiven/aiven-go-client/v2" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/aiven/terraform-provider-aiven/internal/schemautil" -) - -func DatasourceServiceIntegration() *schema.Resource { - return &schema.Resource{ - ReadContext: datasourceServiceIntegrationRead, - Description: "The Service Integration data source provides information about the existing Aiven Service Integration.", - Schema: schemautil.ResourceSchemaAsDatasourceSchema(aivenServiceIntegrationSchema, - "project", "integration_type", "source_service_name", "destination_service_name"), - } -} - -func datasourceServiceIntegrationRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - client := m.(*aiven.Client) - - projectName := d.Get("project").(string) - integrationType := d.Get("integration_type").(string) - sourceServiceName := d.Get("source_service_name").(string) - destinationServiceName := d.Get("destination_service_name").(string) - - integrations, err := client.ServiceIntegrations.List(ctx, projectName, sourceServiceName) - if err != nil { - return diag.Errorf("unable to list integrations for %s/%s: %s", projectName, sourceServiceName, err) - } - - for _, i := range integrations { - if i.SourceService == nil || i.DestinationService == nil { - continue - } - - if i.IntegrationType == integrationType && - *i.SourceService == sourceServiceName && - *i.DestinationService == destinationServiceName { - - d.SetId(schemautil.BuildResourceID(projectName, i.ServiceIntegrationID)) - return resourceServiceIntegrationRead(ctx, d, m) - } - } - - return diag.Errorf("common integration %s/%s/%s/%s not found", - projectName, integrationType, sourceServiceName, destinationServiceName) -} diff --git a/ucgenerator/main.go b/ucgenerator/main.go index 4cb3aa912..ef084010b 100644 --- a/ucgenerator/main.go +++ b/ucgenerator/main.go @@ -26,7 +26,7 @@ const ( importSchemautil = "github.com/aiven/terraform-provider-aiven/internal/schemautil" importResourceSchema = "github.com/hashicorp/terraform-plugin-framework/resource/schema" importDatasourceSchema = "github.com/hashicorp/terraform-plugin-framework/datasource/schema" - importListvalidator = "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + importSetValidator = "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" importValidator = "github.com/hashicorp/terraform-plugin-framework/schema/validator" codeGenerated = "Code generated by user config generator. DO NOT EDIT." ) @@ -143,19 +143,19 @@ func genAllForObject(f *jen.File, o *object) { // Exports handy public functions for root object only f.Op(` // Expand public function that converts tf object into dto -func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { - return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig { + return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set) } // Flatten public function that converts dto into tf object -func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set { o := new(dtoUserConfig) err := schemautil.MapToDTO(m, o) if err != nil { diags.AddError("failed to marshal map user config to dto", err.Error()) - return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs}) } - return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) + return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) } `) } @@ -169,7 +169,7 @@ func genExpander(f *jen.File, o *object) { switch p.Type { case objectTypeObject: value = jen.Op(p.varName) - v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "ExpandListBlockNested").Types(jen.Id(p.tfoStructName), jen.Id(p.dtoStructName)).Call( + v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "ExpandSetBlockNested").Types(jen.Id(p.tfoStructName), jen.Id(p.dtoStructName)).Call( jen.Id("ctx"), jen.Id("diags"), jen.Id("expand"+p.camelName), @@ -180,7 +180,7 @@ func genExpander(f *jen.File, o *object) { value = jen.Op(p.varName) if p.ArrayItems.Type == objectTypeObject { // It is a list of objects - v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "ExpandListNested").Types(jen.Id(p.tfoStructName), jen.Id(p.dtoStructName)).Call( + v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "ExpandSetNested").Types(jen.Id(p.tfoStructName), jen.Id(p.dtoStructName)).Call( jen.Id("ctx"), jen.Id("diags"), jen.Id("expand"+p.camelName), @@ -191,7 +191,7 @@ func genExpander(f *jen.File, o *object) { // It is a list of scalars // We don't want pointer scalars here t := strings.ReplaceAll(getDTOType(p.ArrayItems), "*", "") - v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "ExpandList").Types(jen.Id(t)).Call( + v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "ExpandSet").Types(jen.Id(t)).Call( jen.Id("ctx"), jen.Id("diags"), jen.Id("o").Dot(p.camelName), @@ -234,7 +234,7 @@ func genFlattener(f *jen.File, o *object) { switch p.Type { case objectTypeObject: value = jen.Op(p.varName) - v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "FlattenListBlockNested").Types(jen.Id(p.dtoStructName), jen.Id(p.tfoStructName)).Call( + v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "FlattenSetBlockNested").Types(jen.Id(p.dtoStructName), jen.Id(p.tfoStructName)).Call( jen.Id("ctx"), jen.Id("diags"), jen.Id("flatten"+p.camelName), @@ -246,7 +246,7 @@ func genFlattener(f *jen.File, o *object) { value = jen.Op(p.varName) if p.ArrayItems.Type == objectTypeObject { // It is a list of objects - v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "FlattenListNested").Types(jen.Id(p.dtoStructName), jen.Id(p.tfoStructName)).Call( + v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "FlattenSetNested").Types(jen.Id(p.dtoStructName), jen.Id(p.tfoStructName)).Call( jen.Id("ctx"), jen.Id("diags"), jen.Id("flatten"+p.camelName), @@ -256,7 +256,7 @@ func genFlattener(f *jen.File, o *object) { body = append(body, v, ifErr()) } else { //It is a list of scalars - v := jen.List(jen.Id(p.varName), jen.Id("d")).Op(":=").Qual(importTypes, "ListValueFrom").Call( + v := jen.List(jen.Id(p.varName), jen.Id("d")).Op(":=").Qual(importTypes, "SetValueFrom").Call( jen.Id("ctx"), jen.Qual(importTypes, getTFType(p.ArrayItems)+"Type"), jen.Id("o").Dot(p.camelName), @@ -309,7 +309,7 @@ func genAttrsMap(f *jen.File, o *object) { } else { v = jen.Qual(importTypes, getTFType(p.ArrayItems)+"Type") } - values[key] = jen.Qual(importTypes, "ListType").Values(jen.Dict{jen.Id("ElemType"): v}) + values[key] = jen.Qual(importTypes, "SetType").Values(jen.Dict{jen.Id("ElemType"): v}) default: values[key] = jen.Qual(importTypes, getTFType(p)+"Type") } @@ -352,7 +352,7 @@ func genSchema(f *jen.File, o *object, name, pkg string) { funcName := fmt.Sprintf("New%sSchema", name) f.Comment(fmt.Sprintf("%s returns %s schema", funcName, strings.ToLower(name))) - f.Func().Id(funcName).Params().Qual(pkg, "ListNestedBlock").Block( + f.Func().Id(funcName).Params().Qual(pkg, "SetNestedBlock").Block( jen.Return(getSchemaAttributes(o, pkg)), ) } @@ -400,7 +400,7 @@ func getSchemaAttributes(o *object, pkg string) jen.Code { values := getSchemaAttributeValues(o, isResource) values[jen.Id("NestedObject")] = jen.Qual(pkg, "NestedBlockObject").Values(nested) - return jen.Qual(pkg, "ListNestedBlock").Values(values) + return jen.Qual(pkg, "SetNestedBlock").Values(values) } func getSchemaAttributeValues(o *object, isResource bool) jen.Dict { @@ -441,7 +441,7 @@ func getSchemaAttributeValues(o *object, isResource bool) jen.Dict { } if len(validators) > 0 { - a[jen.Id("Validators")] = valValidatorList(validators...) + a[jen.Id("Validators")] = valValidatorSet(validators...) } return a @@ -452,11 +452,11 @@ func getTFType(o *object) string { switch o.Type { case objectTypeObject: if o.isNestedBlock() { - return "List" + return "Set" } return "Map" case objectTypeArray: - return "List" + return "Set" case objectTypeString: return "String" case objectTypeBoolean: @@ -573,7 +573,7 @@ func addDot(s string) string { } func getValidator(name string, v any) *jen.Statement { - return jen.Qual(importListvalidator, name).Call(jen.Lit(v)) + return jen.Qual(importSetValidator, name).Call(jen.Lit(v)) } func valSizeAtLeast(n int) *jen.Statement { @@ -584,8 +584,8 @@ func valSizeAtMost(n int) *jen.Statement { return getValidator("SizeAtMost", n) } -func valValidatorList(c ...jen.Code) *jen.Statement { - return jen.Index().Qual(importValidator, "List").Values(c...) +func valValidatorSet(c ...jen.Code) *jen.Statement { + return jen.Index().Qual(importValidator, "Set").Values(c...) } func ifErr() *jen.Statement {