diff --git a/CHANGELOG.md b/CHANGELOG.md
index 7def09051..d1f74ca33 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,13 +6,9 @@ nav_order: 1
# Changelog
-## [5.0.0] - YYYY-MM-DD
-
-- Migrate `aiven_service_integration` to the Plugin Framework
-- Deprecating `project_user`, `account_team` and `account_team_member` resources
-
## [X.Y.Z] - YYYY-MM-DD
+- Deprecating `project_user`, `account_team` and `account_team_member` resources
- Fix unmarshalling empty userconfig crash
## [4.9.3] - 2023-10-27
diff --git a/docs/data-sources/service_integration.md b/docs/data-sources/service_integration.md
index c5a1bf004..354e8152b 100644
--- a/docs/data-sources/service_integration.md
+++ b/docs/data-sources/service_integration.md
@@ -30,248 +30,248 @@ data "aiven_service_integration" "myintegration" {
### Read-Only
-- `clickhouse_kafka_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config))
-- `clickhouse_postgresql_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config))
-- `datadog_user_config` (Block Set) (see [below for nested schema](#nestedblock--datadog_user_config))
+- `clickhouse_kafka_user_config` (List of Object) ClickhouseKafka user configurable settings (see [below for nested schema](#nestedatt--clickhouse_kafka_user_config))
+- `clickhouse_postgresql_user_config` (List of Object) ClickhousePostgresql user configurable settings (see [below for nested schema](#nestedatt--clickhouse_postgresql_user_config))
+- `datadog_user_config` (List of Object) Datadog user configurable settings (see [below for nested schema](#nestedatt--datadog_user_config))
- `destination_endpoint_id` (String) Destination endpoint for the integration (if any)
-- `external_aws_cloudwatch_metrics_user_config` (Block Set) External AWS CloudWatch Metrics integration user config (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config))
+- `external_aws_cloudwatch_metrics_user_config` (List of Object) ExternalAwsCloudwatchMetrics user configurable settings (see [below for nested schema](#nestedatt--external_aws_cloudwatch_metrics_user_config))
- `id` (String) The ID of this resource.
- `integration_id` (String) Service Integration Id at aiven
-- `kafka_connect_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--kafka_connect_user_config))
-- `kafka_logs_user_config` (Block Set) (see [below for nested schema](#nestedblock--kafka_logs_user_config))
-- `kafka_mirrormaker_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config))
-- `logs_user_config` (Block Set) (see [below for nested schema](#nestedblock--logs_user_config))
-- `metrics_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--metrics_user_config))
+- `kafka_connect_user_config` (List of Object) KafkaConnect user configurable settings (see [below for nested schema](#nestedatt--kafka_connect_user_config))
+- `kafka_logs_user_config` (List of Object) KafkaLogs user configurable settings (see [below for nested schema](#nestedatt--kafka_logs_user_config))
+- `kafka_mirrormaker_user_config` (List of Object) KafkaMirrormaker user configurable settings (see [below for nested schema](#nestedatt--kafka_mirrormaker_user_config))
+- `logs_user_config` (List of Object) Logs user configurable settings (see [below for nested schema](#nestedatt--logs_user_config))
+- `metrics_user_config` (List of Object) Metrics user configurable settings (see [below for nested schema](#nestedatt--metrics_user_config))
- `source_endpoint_id` (String) Source endpoint for the integration (if any)
-
+
### Nested Schema for `clickhouse_kafka_user_config`
Read-Only:
-- `tables` (Block Set) Tables to create (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables))
+- `tables` (List of Object) (see [below for nested schema](#nestedobjatt--clickhouse_kafka_user_config--tables))
-
+
### Nested Schema for `clickhouse_kafka_user_config.tables`
Read-Only:
-- `auto_offset_reset` (String) Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.
-- `columns` (Block Set) Table columns (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--columns))
-- `data_format` (String) Message data format. The default value is `JSONEachRow`.
-- `date_time_input_format` (String) Method to read DateTime from text input formats. The default value is `basic`.
-- `group_name` (String) Kafka consumers group. The default value is `clickhouse`.
-- `handle_error_mode` (String) How to handle errors for Kafka engine. The default value is `default`.
-- `max_block_size` (Number) Number of row collected by poll(s) for flushing data from Kafka. The default value is `0`.
-- `max_rows_per_message` (Number) The maximum number of rows produced in one kafka message for row-based formats. The default value is `1`.
-- `name` (String) Name of the table.
-- `num_consumers` (Number) The number of consumers per table per replica. The default value is `1`.
-- `poll_max_batch_size` (Number) Maximum amount of messages to be polled in a single Kafka poll. The default value is `0`.
-- `skip_broken_messages` (Number) Skip at least this number of broken messages from Kafka topic per block. The default value is `0`.
-- `topics` (Block Set) Kafka topics (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--topics))
-
-
+- `auto_offset_reset` (String)
+- `columns` (List of Object) (see [below for nested schema](#nestedobjatt--clickhouse_kafka_user_config--tables--columns))
+- `data_format` (String)
+- `date_time_input_format` (String)
+- `group_name` (String)
+- `handle_error_mode` (String)
+- `max_block_size` (Number)
+- `max_rows_per_message` (Number)
+- `name` (String)
+- `num_consumers` (Number)
+- `poll_max_batch_size` (Number)
+- `skip_broken_messages` (Number)
+- `topics` (List of Object) (see [below for nested schema](#nestedobjatt--clickhouse_kafka_user_config--tables--topics))
+
+
### Nested Schema for `clickhouse_kafka_user_config.tables.columns`
Read-Only:
-- `name` (String) Column name.
-- `type` (String) Column type.
+- `name` (String)
+- `type` (String)
-
+
### Nested Schema for `clickhouse_kafka_user_config.tables.topics`
Read-Only:
-- `name` (String) Name of the topic.
+- `name` (String)
-
+
### Nested Schema for `clickhouse_postgresql_user_config`
Read-Only:
-- `databases` (Block Set) Databases to expose (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config--databases))
+- `databases` (List of Object) (see [below for nested schema](#nestedobjatt--clickhouse_postgresql_user_config--databases))
-
+
### Nested Schema for `clickhouse_postgresql_user_config.databases`
Read-Only:
-- `database` (String) PostgreSQL database to expose. The default value is `defaultdb`.
-- `schema` (String) PostgreSQL schema to expose. The default value is `public`.
+- `database` (String)
+- `schema` (String)
-
+
### Nested Schema for `datadog_user_config`
Read-Only:
-- `datadog_dbm_enabled` (Boolean) Enable Datadog Database Monitoring.
-- `datadog_tags` (Block Set) Custom tags provided by user (see [below for nested schema](#nestedblock--datadog_user_config--datadog_tags))
-- `exclude_consumer_groups` (Set of String) List of custom metrics.
-- `exclude_topics` (Set of String) List of topics to exclude.
-- `include_consumer_groups` (Set of String) List of custom metrics.
-- `include_topics` (Set of String) List of topics to include.
-- `kafka_custom_metrics` (Set of String) List of custom metrics.
-- `max_jmx_metrics` (Number) Maximum number of JMX metrics to send.
-- `opensearch` (Block Set) Datadog Opensearch Options (see [below for nested schema](#nestedblock--datadog_user_config--opensearch))
-- `redis` (Block Set) Datadog Redis Options (see [below for nested schema](#nestedblock--datadog_user_config--redis))
-
-
+- `datadog_dbm_enabled` (Boolean)
+- `datadog_tags` (List of Object) (see [below for nested schema](#nestedobjatt--datadog_user_config--datadog_tags))
+- `exclude_consumer_groups` (List of String)
+- `exclude_topics` (List of String)
+- `include_consumer_groups` (List of String)
+- `include_topics` (List of String)
+- `kafka_custom_metrics` (List of String)
+- `max_jmx_metrics` (Number)
+- `opensearch` (List of Object) (see [below for nested schema](#nestedobjatt--datadog_user_config--opensearch))
+- `redis` (List of Object) (see [below for nested schema](#nestedobjatt--datadog_user_config--redis))
+
+
### Nested Schema for `datadog_user_config.datadog_tags`
Read-Only:
-- `comment` (String) Optional tag explanation.
-- `tag` (String) Tag format and usage are described here: https://docs.datadoghq.com/getting_started/tagging. Tags with prefix 'aiven-' are reserved for Aiven.
+- `comment` (String)
+- `tag` (String)
-
+
### Nested Schema for `datadog_user_config.opensearch`
Read-Only:
-- `index_stats_enabled` (Boolean) Enable Datadog Opensearch Index Monitoring.
-- `pending_task_stats_enabled` (Boolean) Enable Datadog Opensearch Pending Task Monitoring.
-- `pshard_stats_enabled` (Boolean) Enable Datadog Opensearch Primary Shard Monitoring.
+- `index_stats_enabled` (Boolean)
+- `pending_task_stats_enabled` (Boolean)
+- `pshard_stats_enabled` (Boolean)
-
+
### Nested Schema for `datadog_user_config.redis`
Read-Only:
-- `command_stats_enabled` (Boolean) Enable command_stats option in the agent's configuration. The default value is `false`.
+- `command_stats_enabled` (Boolean)
-
+
### Nested Schema for `external_aws_cloudwatch_metrics_user_config`
Read-Only:
-- `dropped_metrics` (Block Set) Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--dropped_metrics))
-- `extra_metrics` (Block Set) Metrics to allow through to AWS CloudWatch (in addition to default metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--extra_metrics))
+- `dropped_metrics` (List of Object) (see [below for nested schema](#nestedobjatt--external_aws_cloudwatch_metrics_user_config--dropped_metrics))
+- `extra_metrics` (List of Object) (see [below for nested schema](#nestedobjatt--external_aws_cloudwatch_metrics_user_config--extra_metrics))
-
+
### Nested Schema for `external_aws_cloudwatch_metrics_user_config.dropped_metrics`
Read-Only:
-- `field` (String) Identifier of a value in the metric.
-- `metric` (String) Identifier of the metric.
+- `field` (String)
+- `metric` (String)
-
+
### Nested Schema for `external_aws_cloudwatch_metrics_user_config.extra_metrics`
Read-Only:
-- `field` (String) Identifier of a value in the metric.
-- `metric` (String) Identifier of the metric.
+- `field` (String)
+- `metric` (String)
-
+
### Nested Schema for `kafka_connect_user_config`
Read-Only:
-- `kafka_connect` (Block Set) Kafka Connect service configuration values (see [below for nested schema](#nestedblock--kafka_connect_user_config--kafka_connect))
+- `kafka_connect` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_connect_user_config--kafka_connect))
-
+
### Nested Schema for `kafka_connect_user_config.kafka_connect`
Read-Only:
-- `config_storage_topic` (String) The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id.
-- `group_id` (String) A unique string that identifies the Connect cluster group this worker belongs to.
-- `offset_storage_topic` (String) The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id.
-- `status_storage_topic` (String) The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id.
+- `config_storage_topic` (String)
+- `group_id` (String)
+- `offset_storage_topic` (String)
+- `status_storage_topic` (String)
-
+
### Nested Schema for `kafka_logs_user_config`
Read-Only:
-- `kafka_topic` (String) Topic name.
-- `selected_log_fields` (Set of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
+- `kafka_topic` (String)
+- `selected_log_fields` (List of String)
-
+
### Nested Schema for `kafka_mirrormaker_user_config`
Read-Only:
-- `cluster_alias` (String) The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, '.', '_', and '-'.
-- `kafka_mirrormaker` (Block Set) Kafka MirrorMaker configuration values (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--kafka_mirrormaker))
+- `cluster_alias` (String)
+- `kafka_mirrormaker` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_mirrormaker_user_config--kafka_mirrormaker))
-
+
### Nested Schema for `kafka_mirrormaker_user_config.kafka_mirrormaker`
Read-Only:
-- `consumer_fetch_min_bytes` (Number) The minimum amount of data the server should return for a fetch request.
-- `producer_batch_size` (Number) The batch size in bytes producer will attempt to collect before publishing to broker.
-- `producer_buffer_memory` (Number) The amount of bytes producer can use for buffering data before publishing to broker.
-- `producer_compression_type` (String) Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.
-- `producer_linger_ms` (Number) The linger time (ms) for waiting new data to arrive for publishing.
-- `producer_max_request_size` (Number) The maximum request size in bytes.
+- `consumer_fetch_min_bytes` (Number)
+- `producer_batch_size` (Number)
+- `producer_buffer_memory` (Number)
+- `producer_compression_type` (String)
+- `producer_linger_ms` (Number)
+- `producer_max_request_size` (Number)
-
+
### Nested Schema for `logs_user_config`
Read-Only:
-- `elasticsearch_index_days_max` (Number) Elasticsearch index retention limit. The default value is `3`.
-- `elasticsearch_index_prefix` (String) Elasticsearch index prefix. The default value is `logs`.
-- `selected_log_fields` (Set of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
+- `elasticsearch_index_days_max` (Number)
+- `elasticsearch_index_prefix` (String)
+- `selected_log_fields` (List of String)
-
+
### Nested Schema for `metrics_user_config`
Read-Only:
-- `database` (String) Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to 'metrics'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
-- `retention_days` (Number) Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.
-- `ro_username` (String) Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to 'metrics_reader'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
-- `source_mysql` (Block Set) Configuration options for metrics where source service is MySQL (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql))
-- `username` (String) Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to 'metrics_writer'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
+- `database` (String)
+- `retention_days` (Number)
+- `ro_username` (String)
+- `source_mysql` (List of Object) (see [below for nested schema](#nestedobjatt--metrics_user_config--source_mysql))
+- `username` (String)
-
+
### Nested Schema for `metrics_user_config.source_mysql`
Read-Only:
-- `telegraf` (Block Set) Configuration options for Telegraf MySQL input plugin (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql--telegraf))
+- `telegraf` (List of Object) (see [below for nested schema](#nestedobjatt--metrics_user_config--source_mysql--telegraf))
-
+
### Nested Schema for `metrics_user_config.source_mysql.telegraf`
Read-Only:
-- `gather_event_waits` (Boolean) Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.
-- `gather_file_events_stats` (Boolean) gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.
-- `gather_index_io_waits` (Boolean) Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.
-- `gather_info_schema_auto_inc` (Boolean) Gather auto_increment columns and max values from information schema.
-- `gather_innodb_metrics` (Boolean) Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.
-- `gather_perf_events_statements` (Boolean) Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.
-- `gather_process_list` (Boolean) Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.
-- `gather_slave_status` (Boolean) Gather metrics from SHOW SLAVE STATUS command output.
-- `gather_table_io_waits` (Boolean) Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.
-- `gather_table_lock_waits` (Boolean) Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.
-- `gather_table_schema` (Boolean) Gather metrics from INFORMATION_SCHEMA.TABLES.
-- `perf_events_statements_digest_text_limit` (Number) Truncates digest text from perf_events_statements into this many characters.
-- `perf_events_statements_limit` (Number) Limits metrics from perf_events_statements.
-- `perf_events_statements_time_limit` (Number) Only include perf_events_statements whose last seen is less than this many seconds.
+- `gather_event_waits` (Boolean)
+- `gather_file_events_stats` (Boolean)
+- `gather_index_io_waits` (Boolean)
+- `gather_info_schema_auto_inc` (Boolean)
+- `gather_innodb_metrics` (Boolean)
+- `gather_perf_events_statements` (Boolean)
+- `gather_process_list` (Boolean)
+- `gather_slave_status` (Boolean)
+- `gather_table_io_waits` (Boolean)
+- `gather_table_lock_waits` (Boolean)
+- `gather_table_schema` (Boolean)
+- `perf_events_statements_digest_text_limit` (Number)
+- `perf_events_statements_limit` (Number)
+- `perf_events_statements_time_limit` (Number)
diff --git a/docs/resources/service_integration.md b/docs/resources/service_integration.md
index b82e11a9d..e8bef4aed 100644
--- a/docs/resources/service_integration.md
+++ b/docs/resources/service_integration.md
@@ -33,17 +33,17 @@ resource "aiven_service_integration" "my_integration_metrics" {
### Optional
-- `clickhouse_kafka_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config))
-- `clickhouse_postgresql_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config))
-- `datadog_user_config` (Block Set) (see [below for nested schema](#nestedblock--datadog_user_config))
+- `clickhouse_kafka_user_config` (Block List, Max: 1) ClickhouseKafka user configurable settings (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config))
+- `clickhouse_postgresql_user_config` (Block List, Max: 1) ClickhousePostgresql user configurable settings (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config))
+- `datadog_user_config` (Block List, Max: 1) Datadog user configurable settings (see [below for nested schema](#nestedblock--datadog_user_config))
- `destination_endpoint_id` (String) Destination endpoint for the integration (if any)
- `destination_service_name` (String) Destination service for the integration (if any)
-- `external_aws_cloudwatch_metrics_user_config` (Block Set) External AWS CloudWatch Metrics integration user config (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config))
-- `kafka_connect_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--kafka_connect_user_config))
-- `kafka_logs_user_config` (Block Set) (see [below for nested schema](#nestedblock--kafka_logs_user_config))
-- `kafka_mirrormaker_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config))
-- `logs_user_config` (Block Set) (see [below for nested schema](#nestedblock--logs_user_config))
-- `metrics_user_config` (Block Set) Integration user config (see [below for nested schema](#nestedblock--metrics_user_config))
+- `external_aws_cloudwatch_metrics_user_config` (Block List, Max: 1) ExternalAwsCloudwatchMetrics user configurable settings (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config))
+- `kafka_connect_user_config` (Block List, Max: 1) KafkaConnect user configurable settings (see [below for nested schema](#nestedblock--kafka_connect_user_config))
+- `kafka_logs_user_config` (Block List, Max: 1) KafkaLogs user configurable settings (see [below for nested schema](#nestedblock--kafka_logs_user_config))
+- `kafka_mirrormaker_user_config` (Block List, Max: 1) KafkaMirrormaker user configurable settings (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config))
+- `logs_user_config` (Block List, Max: 1) Logs user configurable settings (see [below for nested schema](#nestedblock--logs_user_config))
+- `metrics_user_config` (Block List, Max: 1) Metrics user configurable settings (see [below for nested schema](#nestedblock--metrics_user_config))
- `source_endpoint_id` (String) Source endpoint for the integration (if any)
- `source_service_name` (String) Source service for the integration (if any)
- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts))
@@ -58,7 +58,7 @@ resource "aiven_service_integration" "my_integration_metrics" {
Optional:
-- `tables` (Block Set) Tables to create (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables))
+- `tables` (Block List, Max: 100) Tables to create. (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables))
### Nested Schema for `clickhouse_kafka_user_config.tables`
@@ -72,7 +72,7 @@ Required:
Optional:
- `auto_offset_reset` (String) Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.
-- `columns` (Block Set) Table columns (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--columns))
+- `columns` (Block List, Max: 100) Table columns. (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--columns))
- `date_time_input_format` (String) Method to read DateTime from text input formats. The default value is `basic`.
- `handle_error_mode` (String) How to handle errors for Kafka engine. The default value is `default`.
- `max_block_size` (Number) Number of row collected by poll(s) for flushing data from Kafka. The default value is `0`.
@@ -80,7 +80,7 @@ Optional:
- `num_consumers` (Number) The number of consumers per table per replica. The default value is `1`.
- `poll_max_batch_size` (Number) Maximum amount of messages to be polled in a single Kafka poll. The default value is `0`.
- `skip_broken_messages` (Number) Skip at least this number of broken messages from Kafka topic per block. The default value is `0`.
-- `topics` (Block Set) Kafka topics (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--topics))
+- `topics` (Block List, Max: 100) Kafka topics. (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--topics))
### Nested Schema for `clickhouse_kafka_user_config.tables.columns`
@@ -106,7 +106,7 @@ Required:
Optional:
-- `databases` (Block Set) Databases to expose (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config--databases))
+- `databases` (Block List, Max: 10) Databases to expose. (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config--databases))
### Nested Schema for `clickhouse_postgresql_user_config.databases`
@@ -124,15 +124,15 @@ Optional:
Optional:
- `datadog_dbm_enabled` (Boolean) Enable Datadog Database Monitoring.
-- `datadog_tags` (Block Set) Custom tags provided by user (see [below for nested schema](#nestedblock--datadog_user_config--datadog_tags))
-- `exclude_consumer_groups` (Set of String) List of custom metrics.
-- `exclude_topics` (Set of String) List of topics to exclude.
-- `include_consumer_groups` (Set of String) List of custom metrics.
-- `include_topics` (Set of String) List of topics to include.
-- `kafka_custom_metrics` (Set of String) List of custom metrics.
+- `datadog_tags` (Block List, Max: 32) Custom tags provided by user. (see [below for nested schema](#nestedblock--datadog_user_config--datadog_tags))
+- `exclude_consumer_groups` (List of String) List of custom metrics.
+- `exclude_topics` (List of String) List of topics to exclude.
+- `include_consumer_groups` (List of String) List of custom metrics.
+- `include_topics` (List of String) List of topics to include.
+- `kafka_custom_metrics` (List of String) List of custom metrics.
- `max_jmx_metrics` (Number) Maximum number of JMX metrics to send.
-- `opensearch` (Block Set) Datadog Opensearch Options (see [below for nested schema](#nestedblock--datadog_user_config--opensearch))
-- `redis` (Block Set) Datadog Redis Options (see [below for nested schema](#nestedblock--datadog_user_config--redis))
+- `opensearch` (Block List, Max: 1) Datadog Opensearch Options. (see [below for nested schema](#nestedblock--datadog_user_config--opensearch))
+- `redis` (Block List, Max: 1) Datadog Redis Options. (see [below for nested schema](#nestedblock--datadog_user_config--redis))
### Nested Schema for `datadog_user_config.datadog_tags`
@@ -170,8 +170,8 @@ Optional:
Optional:
-- `dropped_metrics` (Block Set) Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--dropped_metrics))
-- `extra_metrics` (Block Set) Metrics to allow through to AWS CloudWatch (in addition to default metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--extra_metrics))
+- `dropped_metrics` (Block List, Max: 1024) Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics). (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--dropped_metrics))
+- `extra_metrics` (Block List, Max: 1024) Metrics to allow through to AWS CloudWatch (in addition to default metrics). (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--extra_metrics))
### Nested Schema for `external_aws_cloudwatch_metrics_user_config.dropped_metrics`
@@ -197,7 +197,7 @@ Required:
Optional:
-- `kafka_connect` (Block Set) Kafka Connect service configuration values (see [below for nested schema](#nestedblock--kafka_connect_user_config--kafka_connect))
+- `kafka_connect` (Block List, Max: 1) Kafka Connect service configuration values. (see [below for nested schema](#nestedblock--kafka_connect_user_config--kafka_connect))
### Nested Schema for `kafka_connect_user_config.kafka_connect`
@@ -220,7 +220,7 @@ Required:
Optional:
-- `selected_log_fields` (Set of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
+- `selected_log_fields` (List of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
@@ -229,7 +229,7 @@ Optional:
Optional:
- `cluster_alias` (String) The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, '.', '_', and '-'.
-- `kafka_mirrormaker` (Block Set) Kafka MirrorMaker configuration values (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--kafka_mirrormaker))
+- `kafka_mirrormaker` (Block List, Max: 1) Kafka MirrorMaker configuration values. (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--kafka_mirrormaker))
### Nested Schema for `kafka_mirrormaker_user_config.kafka_mirrormaker`
@@ -252,7 +252,7 @@ Optional:
- `elasticsearch_index_days_max` (Number) Elasticsearch index retention limit. The default value is `3`.
- `elasticsearch_index_prefix` (String) Elasticsearch index prefix. The default value is `logs`.
-- `selected_log_fields` (Set of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
+- `selected_log_fields` (List of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.
@@ -263,7 +263,7 @@ Optional:
- `database` (String) Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to 'metrics'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- `retention_days` (Number) Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.
- `ro_username` (String) Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to 'metrics_reader'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
-- `source_mysql` (Block Set) Configuration options for metrics where source service is MySQL (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql))
+- `source_mysql` (Block List, Max: 1) Configuration options for metrics where source service is MySQL. (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql))
- `username` (String) Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to 'metrics_writer'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
@@ -271,7 +271,7 @@ Optional:
Optional:
-- `telegraf` (Block Set) Configuration options for Telegraf MySQL input plugin (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql--telegraf))
+- `telegraf` (Block List, Max: 1) Configuration options for Telegraf MySQL input plugin. (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql--telegraf))
### Nested Schema for `metrics_user_config.source_mysql.telegraf`
@@ -301,10 +301,11 @@ Optional:
Optional:
-- `create` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
-- `delete` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs.
-- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled.
-- `update` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours).
+- `create` (String)
+- `default` (String)
+- `delete` (String)
+- `read` (String)
+- `update` (String)
## Import
Import is supported using the following syntax:
```shell
diff --git a/internal/plugin/provider.go b/internal/plugin/provider.go
index 9a64a061d..07a71fd0e 100644
--- a/internal/plugin/provider.go
+++ b/internal/plugin/provider.go
@@ -1,4 +1,4 @@
-// Package provider is the implementation of the Aiven provider.
+// Package plugin is the implementation of the Aiven provider.
package plugin
import (
@@ -14,7 +14,6 @@ import (
"github.com/aiven/terraform-provider-aiven/internal/common"
"github.com/aiven/terraform-provider-aiven/internal/plugin/errmsg"
"github.com/aiven/terraform-provider-aiven/internal/plugin/service/organization"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/serviceintegration"
)
// AivenProvider is the provider implementation for Aiven.
@@ -111,7 +110,6 @@ func (p *AivenProvider) Configure(
func (p *AivenProvider) Resources(context.Context) []func() resource.Resource {
return []func() resource.Resource{
organization.NewOrganizationResource,
- serviceintegration.NewServiceIntegrationResource,
}
}
@@ -119,7 +117,6 @@ func (p *AivenProvider) Resources(context.Context) []func() resource.Resource {
func (p *AivenProvider) DataSources(context.Context) []func() datasource.DataSource {
return []func() datasource.DataSource{
organization.NewOrganizationDataSource,
- serviceintegration.NewServiceIntegrationDataSource,
}
}
diff --git a/internal/plugin/service/serviceintegration/models.go b/internal/plugin/service/serviceintegration/models.go
deleted file mode 100644
index f2a752a8e..000000000
--- a/internal/plugin/service/serviceintegration/models.go
+++ /dev/null
@@ -1,106 +0,0 @@
-package serviceintegration
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
- "github.com/hashicorp/terraform-plugin-framework/types"
-)
-
-const (
- idProjectIndex = 0
- idIntegrationIDIndex = 1
-)
-
-// Plugin framework doesn't support embedded structs
-// https://github.com/hashicorp/terraform-plugin-framework/issues/242
-// We use resourceModel as base model, and copy state to/from dataSourceModel for datasource
-type resourceModel struct {
- Timeouts timeouts.Value `tfsdk:"timeouts"`
- ID types.String `tfsdk:"id" copier:"ID"`
- Project types.String `tfsdk:"project" copier:"Project"`
- IntegrationID types.String `tfsdk:"integration_id" copier:"IntegrationID"`
- DestinationEndpointID types.String `tfsdk:"destination_endpoint_id" copier:"DestinationEndpointID"`
- DestinationServiceName types.String `tfsdk:"destination_service_name" copier:"DestinationServiceName"`
- IntegrationType types.String `tfsdk:"integration_type" copier:"IntegrationType"`
- SourceEndpointID types.String `tfsdk:"source_endpoint_id" copier:"SourceEndpointID"`
- SourceServiceName types.String `tfsdk:"source_service_name" copier:"SourceServiceName"`
- ClickhouseKafkaUserConfig types.Set `tfsdk:"clickhouse_kafka_user_config" copier:"ClickhouseKafkaUserConfig"`
- ClickhousePostgresqlUserConfig types.Set `tfsdk:"clickhouse_postgresql_user_config" copier:"ClickhousePostgresqlUserConfig"`
- DatadogUserConfig types.Set `tfsdk:"datadog_user_config" copier:"DatadogUserConfig"`
- ExternalAwsCloudwatchMetricsUserConfig types.Set `tfsdk:"external_aws_cloudwatch_metrics_user_config" copier:"ExternalAwsCloudwatchMetricsUserConfig"`
- KafkaConnectUserConfig types.Set `tfsdk:"kafka_connect_user_config" copier:"KafkaConnectUserConfig"`
- KafkaLogsUserConfig types.Set `tfsdk:"kafka_logs_user_config" copier:"KafkaLogsUserConfig"`
- KafkaMirrormakerUserConfig types.Set `tfsdk:"kafka_mirrormaker_user_config" copier:"KafkaMirrormakerUserConfig"`
- LogsUserConfig types.Set `tfsdk:"logs_user_config" copier:"LogsUserConfig"`
- MetricsUserConfig types.Set `tfsdk:"metrics_user_config" copier:"MetricsUserConfig"`
-}
-
-type dataSourceModel struct {
- ID types.String `tfsdk:"id" copier:"ID"`
- Project types.String `tfsdk:"project" copier:"Project"`
- IntegrationID types.String `tfsdk:"integration_id" copier:"IntegrationID"`
- DestinationEndpointID types.String `tfsdk:"destination_endpoint_id" copier:"DestinationEndpointID"`
- DestinationServiceName types.String `tfsdk:"destination_service_name" copier:"DestinationServiceName"`
- IntegrationType types.String `tfsdk:"integration_type" copier:"IntegrationType"`
- SourceEndpointID types.String `tfsdk:"source_endpoint_id" copier:"SourceEndpointID"`
- SourceServiceName types.String `tfsdk:"source_service_name" copier:"SourceServiceName"`
- ClickhouseKafkaUserConfig types.Set `tfsdk:"clickhouse_kafka_user_config" copier:"ClickhouseKafkaUserConfig"`
- ClickhousePostgresqlUserConfig types.Set `tfsdk:"clickhouse_postgresql_user_config" copier:"ClickhousePostgresqlUserConfig"`
- DatadogUserConfig types.Set `tfsdk:"datadog_user_config" copier:"DatadogUserConfig"`
- ExternalAwsCloudwatchMetricsUserConfig types.Set `tfsdk:"external_aws_cloudwatch_metrics_user_config" copier:"ExternalAwsCloudwatchMetricsUserConfig"`
- KafkaConnectUserConfig types.Set `tfsdk:"kafka_connect_user_config" copier:"KafkaConnectUserConfig"`
- KafkaLogsUserConfig types.Set `tfsdk:"kafka_logs_user_config" copier:"KafkaLogsUserConfig"`
- KafkaMirrormakerUserConfig types.Set `tfsdk:"kafka_mirrormaker_user_config" copier:"KafkaMirrormakerUserConfig"`
- LogsUserConfig types.Set `tfsdk:"logs_user_config" copier:"LogsUserConfig"`
- MetricsUserConfig types.Set `tfsdk:"metrics_user_config" copier:"MetricsUserConfig"`
-}
-
-func (p *resourceModel) getID() string {
- i := p.IntegrationID.ValueString()
- if i != "" {
- return i
- }
- return getIDIndex(p.ID.ValueString(), idIntegrationIDIndex)
-}
-
-func (p *resourceModel) getProject() string {
- project := p.Project.ValueString()
- if project != "" {
- return project
- }
- return getIDIndex(p.ID.ValueString(), idProjectIndex)
-}
-
-func getIDIndex(s string, i int) string {
- list := strings.Split(s, "/")
- if i < len(list) {
- return list[i]
- }
- return ""
-}
-
-func getEndpointIDPointer(s string) *string {
- id := getIDIndex(s, idIntegrationIDIndex)
- if s == "" {
- return nil
- }
- return &id
-}
-
-func getProjectPointer(s string) *string {
- id := getIDIndex(s, idProjectIndex)
- if s == "" {
- return nil
- }
- return &id
-}
-
-func newEndpointID(project string, s *string) types.String {
- if s != nil {
- v := fmt.Sprintf("%s/%s", project, *s)
- s = &v
- }
- return types.StringPointerValue(s)
-}
diff --git a/internal/plugin/service/serviceintegration/service_integration_data_source.go b/internal/plugin/service/serviceintegration/service_integration_data_source.go
deleted file mode 100644
index 12e624d6a..000000000
--- a/internal/plugin/service/serviceintegration/service_integration_data_source.go
+++ /dev/null
@@ -1,140 +0,0 @@
-package serviceintegration
-
-import (
- "context"
-
- "github.com/aiven/aiven-go-client/v2"
- "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
- "github.com/hashicorp/terraform-plugin-framework/datasource"
- "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
- "github.com/hashicorp/terraform-plugin-framework/schema/validator"
- "github.com/jinzhu/copier"
-
- "github.com/aiven/terraform-provider-aiven/internal/plugin/errmsg"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/clickhousekafka"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/clickhousepostgresql"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/datadog"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkaconnect"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkalogs"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkamirrormaker"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/logs"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/metrics"
- "github.com/aiven/terraform-provider-aiven/internal/schemautil"
-)
-
-var (
- _ datasource.DataSource = &serviceIntegrationDataSource{}
- _ datasource.DataSourceWithConfigure = &serviceIntegrationDataSource{}
-)
-
-func NewServiceIntegrationDataSource() datasource.DataSource {
- return &serviceIntegrationDataSource{}
-}
-
-type serviceIntegrationDataSource struct {
- client *aiven.Client
-}
-
-func (s *serviceIntegrationDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, _ *datasource.ConfigureResponse) {
- if req.ProviderData == nil {
- return
- }
-
- s.client = req.ProviderData.(*aiven.Client)
-}
-
-func (s *serviceIntegrationDataSource) Metadata(_ context.Context, _ datasource.MetadataRequest, resp *datasource.MetadataResponse) {
- resp.TypeName = "aiven_service_integration"
-}
-
-func (s *serviceIntegrationDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
- resp.Schema = schema.Schema{
- Description: "The Service Integration data source provides information about the existing Aiven Service Integration.",
- Attributes: map[string]schema.Attribute{
- "id": schema.StringAttribute{
- Computed: true,
- Validators: []validator.String{endpointIDValidator},
- },
- "integration_id": schema.StringAttribute{
- Description: "Service Integration Id at aiven",
- Computed: true,
- },
- "destination_endpoint_id": schema.StringAttribute{
- Description: "Destination endpoint for the integration (if any)",
- Computed: true,
- Validators: []validator.String{endpointIDValidator},
- },
- "destination_service_name": schema.StringAttribute{
- Description: "Destination service for the integration (if any)",
- Required: true,
- },
- "integration_type": schema.StringAttribute{
- Description: "Type of the service integration. Possible values: " + schemautil.JoinQuoted(integrationTypes(), ", ", "`"),
- Required: true,
- Validators: []validator.String{
- stringvalidator.OneOf(integrationTypes()...),
- },
- },
- "project": schema.StringAttribute{
- Description: "Project the integration belongs to",
- Required: true,
- },
- "source_endpoint_id": schema.StringAttribute{
- Description: "Source endpoint for the integration (if any)",
- Computed: true,
- Validators: []validator.String{endpointIDValidator},
- },
- "source_service_name": schema.StringAttribute{
- Description: "Source service for the integration (if any)",
- Required: true,
- },
- },
- Blocks: map[string]schema.Block{
- "clickhouse_kafka_user_config": clickhousekafka.NewDataSourceSchema(),
- "clickhouse_postgresql_user_config": clickhousepostgresql.NewDataSourceSchema(),
- "datadog_user_config": datadog.NewDataSourceSchema(),
- "external_aws_cloudwatch_metrics_user_config": externalawscloudwatchmetrics.NewDataSourceSchema(),
- "kafka_connect_user_config": kafkaconnect.NewDataSourceSchema(),
- "kafka_logs_user_config": kafkalogs.NewDataSourceSchema(),
- "kafka_mirrormaker_user_config": kafkamirrormaker.NewDataSourceSchema(),
- "logs_user_config": logs.NewDataSourceSchema(),
- "metrics_user_config": metrics.NewDataSourceSchema(),
- },
- }
-}
-
-// Read reads datasource
-// All functions adapted for resourceModel, so we use it as donor
-// Copies state from datasource to resource, then back, when things are done
-func (s *serviceIntegrationDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
- var o dataSourceModel
- resp.Diagnostics.Append(req.Config.Get(ctx, &o)...)
- if resp.Diagnostics.HasError() {
- return
- }
-
- var res resourceModel
- err := copier.Copy(&res, &o)
- if err != nil {
- resp.Diagnostics.AddError("data config copy error", err.Error())
- }
-
- dto, err := getSIByName(ctx, s.client, &res)
- if err != nil {
- resp.Diagnostics.AddError(errmsg.SummaryErrorReadingResource, err.Error())
- return
- }
-
- loadFromDTO(ctx, &resp.Diagnostics, &res, dto)
- if resp.Diagnostics.HasError() {
- return
- }
-
- err = copier.Copy(&o, &res)
- if err != nil {
- resp.Diagnostics.AddError("dto copy error", err.Error())
- }
-
- resp.Diagnostics.Append(resp.State.Set(ctx, o)...)
-}
diff --git a/internal/plugin/service/serviceintegration/service_integration_resource.go b/internal/plugin/service/serviceintegration/service_integration_resource.go
deleted file mode 100644
index 6eb8883a9..000000000
--- a/internal/plugin/service/serviceintegration/service_integration_resource.go
+++ /dev/null
@@ -1,340 +0,0 @@
-package serviceintegration
-
-import (
- "context"
- "fmt"
- "regexp"
-
- "github.com/aiven/aiven-go-client/v2"
- "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
- "github.com/hashicorp/terraform-plugin-framework/diag"
- "github.com/hashicorp/terraform-plugin-framework/path"
- "github.com/hashicorp/terraform-plugin-framework/resource"
- "github.com/hashicorp/terraform-plugin-framework/resource/schema"
- "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
- "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
- "github.com/hashicorp/terraform-plugin-framework/schema/validator"
- "github.com/hashicorp/terraform-plugin-framework/tfsdk"
- "github.com/hashicorp/terraform-plugin-framework/types"
-
- "github.com/aiven/terraform-provider-aiven/internal/plugin/errmsg"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/clickhousekafka"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/clickhousepostgresql"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/datadog"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkaconnect"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkalogs"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkamirrormaker"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/logs"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/metrics"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/util"
- "github.com/aiven/terraform-provider-aiven/internal/schemautil"
-)
-
-var endpointIDValidator = stringvalidator.RegexMatches(
- regexp.MustCompile(`^[a-zA-Z0-9_-]*/[a-zA-Z0-9_-]*$`),
- "endpoint id should have the following format: project_name/endpoint_id",
-)
-
-var (
- _ resource.Resource = &serviceIntegrationResource{}
- _ resource.ResourceWithConfigure = &serviceIntegrationResource{}
- _ resource.ResourceWithImportState = &serviceIntegrationResource{}
-)
-
-func NewServiceIntegrationResource() resource.Resource {
- return &serviceIntegrationResource{}
-}
-
-type serviceIntegrationResource struct {
- client *aiven.Client
-}
-
-func (s *serviceIntegrationResource) Configure(_ context.Context, req resource.ConfigureRequest, _ *resource.ConfigureResponse) {
- if req.ProviderData == nil {
- return
- }
-
- s.client = req.ProviderData.(*aiven.Client)
-}
-
-func (s *serviceIntegrationResource) Metadata(_ context.Context, _ resource.MetadataRequest, resp *resource.MetadataResponse) {
- resp.TypeName = "aiven_service_integration"
-}
-
-func (s *serviceIntegrationResource) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
- resp.Schema = util.GeneralizeSchema(ctx, schema.Schema{
- Description: "The Service Integration resource allows the creation and management of Aiven Service Integrations.",
- Attributes: map[string]schema.Attribute{
- "id": schema.StringAttribute{
- Computed: true,
- Validators: []validator.String{endpointIDValidator},
- },
- "integration_id": schema.StringAttribute{
- Description: "Service Integration Id at aiven",
- Computed: true,
- },
- "destination_endpoint_id": schema.StringAttribute{
- Description: "Destination endpoint for the integration (if any)",
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.RequiresReplace(),
- },
- Optional: true,
- Validators: []validator.String{
- endpointIDValidator,
- stringvalidator.ExactlyOneOf(
- path.MatchRoot("destination_endpoint_id"),
- path.MatchRoot("destination_service_name"),
- ),
- },
- },
- "destination_service_name": schema.StringAttribute{
- Description: "Destination service for the integration (if any)",
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.RequiresReplace(),
- },
- Optional: true,
- },
- "integration_type": schema.StringAttribute{
- Description: "Type of the service integration. Possible values: " + schemautil.JoinQuoted(integrationTypes(), ", ", "`"),
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.RequiresReplace(),
- },
- Required: true,
- Validators: []validator.String{
- stringvalidator.OneOf(integrationTypes()...),
- },
- },
- "project": schema.StringAttribute{
- Description: "Project the integration belongs to",
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.RequiresReplace(),
- },
- Required: true,
- },
- "source_endpoint_id": schema.StringAttribute{
- Description: "Source endpoint for the integration (if any)",
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.RequiresReplace(),
- },
- Optional: true,
- Validators: []validator.String{
- endpointIDValidator,
- stringvalidator.ExactlyOneOf(
- path.MatchRoot("source_endpoint_id"),
- path.MatchRoot("source_service_name"),
- ),
- },
- },
- "source_service_name": schema.StringAttribute{
- Description: "Source service for the integration (if any)",
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.RequiresReplace(),
- },
- Optional: true,
- },
- },
- Blocks: map[string]schema.Block{
- "clickhouse_kafka_user_config": clickhousekafka.NewResourceSchema(),
- "clickhouse_postgresql_user_config": clickhousepostgresql.NewResourceSchema(),
- "datadog_user_config": datadog.NewResourceSchema(),
- "external_aws_cloudwatch_metrics_user_config": externalawscloudwatchmetrics.NewResourceSchema(),
- "kafka_connect_user_config": kafkaconnect.NewResourceSchema(),
- "kafka_logs_user_config": kafkalogs.NewResourceSchema(),
- "kafka_mirrormaker_user_config": kafkamirrormaker.NewResourceSchema(),
- "logs_user_config": logs.NewResourceSchema(),
- "metrics_user_config": metrics.NewResourceSchema(),
- },
- })
-}
-
-func (s *serviceIntegrationResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
- var o resourceModel
- resp.Diagnostics.Append(req.Plan.Get(ctx, &o)...)
- if resp.Diagnostics.HasError() {
- return
- }
-
- // read_replicas can be only be created alongside the service. also the only way to promote the replica
- // is to delete the service integration that was created, so we should make it least painful to do so.
- // for now, we support to seemlessly import preexisting 'read_replica' service integrations in the resource create
- // all other integrations should be imported using `terraform import`
- if o.IntegrationType.ValueString() == readReplicaType {
- if preexisting, err := getSIByName(ctx, s.client, &o); err != nil {
- resp.Diagnostics.AddError("Unable to search for possible preexisting 'read_replica' service integration", err.Error())
- return
- } else if preexisting != nil {
- o.IntegrationID = types.StringValue(preexisting.ServiceIntegrationID)
- s.read(ctx, &resp.Diagnostics, &resp.State, &o)
- return
- }
- }
-
- userConfig := expandUserConfig(ctx, &resp.Diagnostics, &o, true)
- if resp.Diagnostics.HasError() {
- return
- }
-
- createReq := aiven.CreateServiceIntegrationRequest{
- DestinationProject: getProjectPointer(o.DestinationEndpointID.ValueString()),
- DestinationEndpointID: getEndpointIDPointer(o.DestinationEndpointID.ValueString()),
- DestinationService: o.DestinationServiceName.ValueStringPointer(),
- IntegrationType: o.IntegrationType.ValueString(),
- SourceProject: getProjectPointer(o.SourceEndpointID.ValueString()),
- SourceEndpointID: getEndpointIDPointer(o.SourceEndpointID.ValueString()),
- SourceService: o.SourceServiceName.ValueStringPointer(),
- UserConfig: userConfig,
- }
-
- dto, err := s.client.ServiceIntegrations.Create(ctx, o.Project.ValueString(), createReq)
- if err != nil {
- resp.Diagnostics.AddError(errmsg.SummaryErrorCreatingResource, err.Error())
- return
- }
-
- o.IntegrationID = types.StringValue(dto.ServiceIntegrationID)
- s.read(ctx, &resp.Diagnostics, &resp.State, &o)
-}
-
-func (s *serviceIntegrationResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
- var o resourceModel
- resp.Diagnostics.Append(req.State.Get(ctx, &o)...)
- if resp.Diagnostics.HasError() {
- return
- }
- s.read(ctx, &resp.Diagnostics, &resp.State, &o)
-}
-
-func (s *serviceIntegrationResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
- var o resourceModel
- resp.Diagnostics.Append(req.Plan.Get(ctx, &o)...)
- if resp.Diagnostics.HasError() {
- return
- }
-
- // We read state to get integration's ID
- var state resourceModel
- resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
- if resp.Diagnostics.HasError() {
- return
- }
-
- // Copies ID from the state
- o.IntegrationID = state.IntegrationID
- userConfig := expandUserConfig(ctx, &resp.Diagnostics, &o, false)
- if resp.Diagnostics.HasError() {
- return
- }
-
- _, err := s.client.ServiceIntegrations.Update(
- ctx,
- state.Project.ValueString(),
- state.IntegrationID.ValueString(),
- aiven.UpdateServiceIntegrationRequest{
- UserConfig: userConfig,
- },
- )
-
- if err != nil {
- resp.Diagnostics.AddError(errmsg.SummaryErrorUpdatingResource, err.Error())
- return
- }
-
- s.read(ctx, &resp.Diagnostics, &resp.State, &o)
-}
-
-func (s *serviceIntegrationResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
- var o resourceModel
- resp.Diagnostics.Append(req.State.Get(ctx, &o)...)
- if resp.Diagnostics.HasError() {
- return
- }
-
- err := s.client.ServiceIntegrations.Delete(ctx, o.Project.ValueString(), o.IntegrationID.ValueString())
- if err != nil {
- resp.Diagnostics.AddError(errmsg.SummaryErrorDeletingResource, err.Error())
- }
-}
-
-func (s *serviceIntegrationResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
- resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp)
-}
-
-// read reads from API and saves to state
-func (s *serviceIntegrationResource) read(ctx context.Context, diags *diag.Diagnostics, state *tfsdk.State, o *resourceModel) {
- dto, err := getSIByID(ctx, s.client, o)
- if err != nil {
- diags.AddError(errmsg.SummaryErrorReadingResource, err.Error())
- return
- }
-
- loadFromDTO(ctx, diags, o, dto)
- if diags.HasError() {
- return
- }
- diags.Append(state.Set(ctx, o)...)
-}
-
-// getSIByID gets ServiceIntegration by ID
-func getSIByID(ctx context.Context, client *aiven.Client, o *resourceModel) (dto *aiven.ServiceIntegration, err error) {
- id := o.getID()
- project := o.getProject()
- if len(id)*len(project) == 0 {
- return nil, fmt.Errorf("no ID or project provided")
- }
-
- return dto, util.WaitActive(ctx, func() error {
- dto, err = client.ServiceIntegrations.Get(ctx, project, id)
- if err != nil {
- return err
- }
- if !dto.Active {
- return fmt.Errorf("service integration is not active")
- }
- return nil
- })
-}
-
-// getSIByName gets ServiceIntegration by name
-func getSIByName(ctx context.Context, client *aiven.Client, o *resourceModel) (*aiven.ServiceIntegration, error) {
- project := o.Project.ValueString()
- integrationType := o.IntegrationType.ValueString()
- sourceServiceName := o.SourceServiceName.ValueString()
- destinationServiceName := o.DestinationServiceName.ValueString()
-
- integrations, err := client.ServiceIntegrations.List(ctx, project, sourceServiceName)
- if err != nil && !aiven.IsNotFound(err) {
- return nil, fmt.Errorf("unable to get list of service integrations: %s", err)
- }
-
- for _, i := range integrations {
- if i.SourceService == nil || i.DestinationService == nil || i.ServiceIntegrationID == "" {
- continue
- }
-
- if i.IntegrationType == integrationType &&
- *i.SourceService == sourceServiceName &&
- *i.DestinationService == destinationServiceName {
- return i, nil
- }
- }
-
- return nil, nil
-}
-
-// loadFromDTO loads API values to terraform object
-func loadFromDTO(ctx context.Context, diags *diag.Diagnostics, o *resourceModel, dto *aiven.ServiceIntegration) {
- flattenUserConfig(ctx, diags, o, dto)
- if diags.HasError() {
- return
- }
-
- id := o.getID()
- project := o.getProject()
- o.ID = newEndpointID(project, &id)
- o.DestinationEndpointID = newEndpointID(project, dto.DestinationEndpointID)
- o.DestinationServiceName = types.StringPointerValue(dto.DestinationService)
- o.IntegrationType = types.StringValue(dto.IntegrationType)
- o.SourceEndpointID = newEndpointID(project, dto.SourceEndpointID)
- o.SourceServiceName = types.StringPointerValue(dto.SourceService)
-}
diff --git a/internal/plugin/service/serviceintegration/userconfig.go b/internal/plugin/service/serviceintegration/userconfig.go
deleted file mode 100644
index 70f3bced8..000000000
--- a/internal/plugin/service/serviceintegration/userconfig.go
+++ /dev/null
@@ -1,135 +0,0 @@
-package serviceintegration
-
-import (
- "context"
-
- "github.com/aiven/aiven-go-client/v2"
- "github.com/hashicorp/terraform-plugin-framework/diag"
-
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/clickhousekafka"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/clickhousepostgresql"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/datadog"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkaconnect"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkalogs"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkamirrormaker"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/logs"
- "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/metrics"
- "github.com/aiven/terraform-provider-aiven/internal/schemautil"
-)
-
-const (
- clickhouseKafkaType = "clickhouse_kafka"
- clickhousePostgresqlType = "clickhouse_postgresql"
- datadogType = "datadog"
- externalAwsCloudwatchMetricsType = "external_aws_cloudwatch_metrics"
- kafkaConnectType = "kafka_connect"
- kafkaLogsType = "kafka_logs"
- kafkaMirrormakerType = "kafka_mirrormaker"
- logsType = "logs"
- metricsType = "metrics"
- readReplicaType = "read_replica"
-)
-
-func integrationTypes() []string {
- return []string{
- "alertmanager",
- "cassandra_cross_service_cluster",
- clickhouseKafkaType,
- clickhousePostgresqlType,
- "dashboard",
- datadogType,
- "datasource",
- "external_aws_cloudwatch_logs",
- externalAwsCloudwatchMetricsType,
- "external_elasticsearch_logs",
- "external_google_cloud_logging",
- "external_opensearch_logs",
- "flink",
- "internal_connectivity",
- "jolokia",
- kafkaConnectType,
- kafkaLogsType,
- kafkaMirrormakerType,
- logsType,
- "m3aggregator",
- "m3coordinator",
- metricsType,
- "opensearch_cross_cluster_replication",
- "opensearch_cross_cluster_search",
- "prometheus",
- readReplicaType,
- "rsyslog",
- "schema_registry_proxy",
- }
-}
-
-// flattenUserConfig from aiven to terraform
-func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *resourceModel, dto *aiven.ServiceIntegration) {
- if dto.UserConfig == nil {
- return
- }
-
- // We set user config from Aiven only if it's been set in TF
- // Otherwise it will produce invalid "after apply"
- switch {
- case schemautil.HasValue(o.ClickhouseKafkaUserConfig):
- o.ClickhouseKafkaUserConfig = clickhousekafka.Flatten(ctx, diags, dto.UserConfig)
- case schemautil.HasValue(o.ClickhousePostgresqlUserConfig):
- o.ClickhousePostgresqlUserConfig = clickhousepostgresql.Flatten(ctx, diags, dto.UserConfig)
- case schemautil.HasValue(o.DatadogUserConfig):
- o.DatadogUserConfig = datadog.Flatten(ctx, diags, dto.UserConfig)
- case schemautil.HasValue(o.ExternalAwsCloudwatchMetricsUserConfig):
- o.ExternalAwsCloudwatchMetricsUserConfig = externalawscloudwatchmetrics.Flatten(ctx, diags, dto.UserConfig)
- case schemautil.HasValue(o.KafkaConnectUserConfig):
- o.KafkaConnectUserConfig = kafkaconnect.Flatten(ctx, diags, dto.UserConfig)
- case schemautil.HasValue(o.KafkaLogsUserConfig):
- o.KafkaLogsUserConfig = kafkalogs.Flatten(ctx, diags, dto.UserConfig)
- case schemautil.HasValue(o.KafkaMirrormakerUserConfig):
- o.KafkaMirrormakerUserConfig = kafkamirrormaker.Flatten(ctx, diags, dto.UserConfig)
- case schemautil.HasValue(o.LogsUserConfig):
- o.LogsUserConfig = logs.Flatten(ctx, diags, dto.UserConfig)
- case schemautil.HasValue(o.MetricsUserConfig):
- o.MetricsUserConfig = metrics.Flatten(ctx, diags, dto.UserConfig)
- }
-}
-
-// expandUserConfig from terraform to aiven
-func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *resourceModel, create bool) map[string]any {
- var config any
-
- // If an invalid integration type is set
- // This will send wrong config to Aiven
- // Which is sort of a validation too
- switch {
- case schemautil.HasValue(o.ClickhouseKafkaUserConfig):
- config = clickhousekafka.Expand(ctx, diags, o.ClickhouseKafkaUserConfig)
- case schemautil.HasValue(o.ClickhousePostgresqlUserConfig):
- config = clickhousepostgresql.Expand(ctx, diags, o.ClickhousePostgresqlUserConfig)
- case schemautil.HasValue(o.DatadogUserConfig):
- config = datadog.Expand(ctx, diags, o.DatadogUserConfig)
- case schemautil.HasValue(o.ExternalAwsCloudwatchMetricsUserConfig):
- config = externalawscloudwatchmetrics.Expand(ctx, diags, o.ExternalAwsCloudwatchMetricsUserConfig)
- case schemautil.HasValue(o.KafkaConnectUserConfig):
- config = kafkaconnect.Expand(ctx, diags, o.KafkaConnectUserConfig)
- case schemautil.HasValue(o.KafkaLogsUserConfig):
- config = kafkalogs.Expand(ctx, diags, o.KafkaLogsUserConfig)
- case schemautil.HasValue(o.KafkaMirrormakerUserConfig):
- config = kafkamirrormaker.Expand(ctx, diags, o.KafkaMirrormakerUserConfig)
- case schemautil.HasValue(o.LogsUserConfig):
- config = logs.Expand(ctx, diags, o.LogsUserConfig)
- case schemautil.HasValue(o.MetricsUserConfig):
- config = metrics.Expand(ctx, diags, o.MetricsUserConfig)
- }
-
- if diags.HasError() {
- return nil
- }
-
- dict, err := schemautil.MarshalUserConfig(config, create)
- if err != nil {
- diags.AddError("Failed to expand user config", err.Error())
- return nil
- }
- return dict
-}
diff --git a/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka.go b/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka.go
deleted file mode 100644
index b22a9ef20..000000000
--- a/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka.go
+++ /dev/null
@@ -1,408 +0,0 @@
-// Code generated by user config generator. DO NOT EDIT.
-
-package clickhousekafka
-
-import (
- "context"
-
- setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator"
- attr "github.com/hashicorp/terraform-plugin-framework/attr"
- datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
- diag "github.com/hashicorp/terraform-plugin-framework/diag"
- resource "github.com/hashicorp/terraform-plugin-framework/resource/schema"
- int64default "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64default"
- stringdefault "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault"
- validator "github.com/hashicorp/terraform-plugin-framework/schema/validator"
- types "github.com/hashicorp/terraform-plugin-framework/types"
-
- schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil"
-)
-
-// NewResourceSchema returns resource schema
-func NewResourceSchema() resource.SetNestedBlock {
- return resource.SetNestedBlock{
- Description: "Integration user config",
- NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"tables": resource.SetNestedBlock{
- Description: "Tables to create",
- NestedObject: resource.NestedBlockObject{
- Attributes: map[string]resource.Attribute{
- "auto_offset_reset": resource.StringAttribute{
- Computed: true,
- Default: stringdefault.StaticString("earliest"),
- Description: "Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.",
- Optional: true,
- },
- "data_format": resource.StringAttribute{
- Description: "Message data format. The default value is `JSONEachRow`.",
- Required: true,
- },
- "date_time_input_format": resource.StringAttribute{
- Computed: true,
- Default: stringdefault.StaticString("basic"),
- Description: "Method to read DateTime from text input formats. The default value is `basic`.",
- Optional: true,
- },
- "group_name": resource.StringAttribute{
- Description: "Kafka consumers group. The default value is `clickhouse`.",
- Required: true,
- },
- "handle_error_mode": resource.StringAttribute{
- Computed: true,
- Default: stringdefault.StaticString("default"),
- Description: "How to handle errors for Kafka engine. The default value is `default`.",
- Optional: true,
- },
- "max_block_size": resource.Int64Attribute{
- Computed: true,
- Default: int64default.StaticInt64(0),
- Description: "Number of row collected by poll(s) for flushing data from Kafka. The default value is `0`.",
- Optional: true,
- },
- "max_rows_per_message": resource.Int64Attribute{
- Computed: true,
- Default: int64default.StaticInt64(1),
- Description: "The maximum number of rows produced in one kafka message for row-based formats. The default value is `1`.",
- Optional: true,
- },
- "name": resource.StringAttribute{
- Description: "Name of the table.",
- Required: true,
- },
- "num_consumers": resource.Int64Attribute{
- Computed: true,
- Default: int64default.StaticInt64(1),
- Description: "The number of consumers per table per replica. The default value is `1`.",
- Optional: true,
- },
- "poll_max_batch_size": resource.Int64Attribute{
- Computed: true,
- Default: int64default.StaticInt64(0),
- Description: "Maximum amount of messages to be polled in a single Kafka poll. The default value is `0`.",
- Optional: true,
- },
- "skip_broken_messages": resource.Int64Attribute{
- Computed: true,
- Default: int64default.StaticInt64(0),
- Description: "Skip at least this number of broken messages from Kafka topic per block. The default value is `0`.",
- Optional: true,
- },
- },
- Blocks: map[string]resource.Block{
- "columns": resource.SetNestedBlock{
- Description: "Table columns",
- NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{
- "name": resource.StringAttribute{
- Description: "Column name.",
- Required: true,
- },
- "type": resource.StringAttribute{
- Description: "Column type.",
- Required: true,
- },
- }},
- Validators: []validator.Set{setvalidator.SizeAtMost(100)},
- },
- "topics": resource.SetNestedBlock{
- Description: "Kafka topics",
- NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{"name": resource.StringAttribute{
- Description: "Name of the topic.",
- Required: true,
- }}},
- Validators: []validator.Set{setvalidator.SizeAtMost(100)},
- },
- },
- },
- Validators: []validator.Set{setvalidator.SizeAtMost(100)},
- }}},
- Validators: []validator.Set{setvalidator.SizeAtMost(1)},
- }
-}
-
-// NewDataSourceSchema returns datasource schema
-func NewDataSourceSchema() datasource.SetNestedBlock {
- return datasource.SetNestedBlock{
- Description: "Integration user config",
- NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"tables": datasource.SetNestedBlock{
- Description: "Tables to create",
- NestedObject: datasource.NestedBlockObject{
- Attributes: map[string]datasource.Attribute{
- "auto_offset_reset": datasource.StringAttribute{
- Computed: true,
- Description: "Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.",
- },
- "data_format": datasource.StringAttribute{
- Computed: true,
- Description: "Message data format. The default value is `JSONEachRow`.",
- },
- "date_time_input_format": datasource.StringAttribute{
- Computed: true,
- Description: "Method to read DateTime from text input formats. The default value is `basic`.",
- },
- "group_name": datasource.StringAttribute{
- Computed: true,
- Description: "Kafka consumers group. The default value is `clickhouse`.",
- },
- "handle_error_mode": datasource.StringAttribute{
- Computed: true,
- Description: "How to handle errors for Kafka engine. The default value is `default`.",
- },
- "max_block_size": datasource.Int64Attribute{
- Computed: true,
- Description: "Number of row collected by poll(s) for flushing data from Kafka. The default value is `0`.",
- },
- "max_rows_per_message": datasource.Int64Attribute{
- Computed: true,
- Description: "The maximum number of rows produced in one kafka message for row-based formats. The default value is `1`.",
- },
- "name": datasource.StringAttribute{
- Computed: true,
- Description: "Name of the table.",
- },
- "num_consumers": datasource.Int64Attribute{
- Computed: true,
- Description: "The number of consumers per table per replica. The default value is `1`.",
- },
- "poll_max_batch_size": datasource.Int64Attribute{
- Computed: true,
- Description: "Maximum amount of messages to be polled in a single Kafka poll. The default value is `0`.",
- },
- "skip_broken_messages": datasource.Int64Attribute{
- Computed: true,
- Description: "Skip at least this number of broken messages from Kafka topic per block. The default value is `0`.",
- },
- },
- Blocks: map[string]datasource.Block{
- "columns": datasource.SetNestedBlock{
- Description: "Table columns",
- NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{
- "name": datasource.StringAttribute{
- Computed: true,
- Description: "Column name.",
- },
- "type": datasource.StringAttribute{
- Computed: true,
- Description: "Column type.",
- },
- }},
- Validators: []validator.Set{setvalidator.SizeAtMost(100)},
- },
- "topics": datasource.SetNestedBlock{
- Description: "Kafka topics",
- NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{"name": datasource.StringAttribute{
- Computed: true,
- Description: "Name of the topic.",
- }}},
- Validators: []validator.Set{setvalidator.SizeAtMost(100)},
- },
- },
- },
- Validators: []validator.Set{setvalidator.SizeAtMost(100)},
- }}},
- Validators: []validator.Set{setvalidator.SizeAtMost(1)},
- }
-}
-
-// tfoUserConfig Integration user config
-type tfoUserConfig struct {
- Tables types.Set `tfsdk:"tables"`
-}
-
-// dtoUserConfig request/response object
-type dtoUserConfig struct {
- Tables []*dtoTables `groups:"create,update" json:"tables,omitempty"`
-}
-
-// expandUserConfig expands tf object into dto object
-func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig {
- tablesVar := schemautil.ExpandSetNested[tfoTables, dtoTables](ctx, diags, expandTables, o.Tables)
- if diags.HasError() {
- return nil
- }
- return &dtoUserConfig{Tables: tablesVar}
-}
-
-// flattenUserConfig flattens dto object into tf object
-func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig {
- tablesVar := schemautil.FlattenSetNested[dtoTables, tfoTables](ctx, diags, flattenTables, tablesAttrs, o.Tables)
- if diags.HasError() {
- return nil
- }
- return &tfoUserConfig{Tables: tablesVar}
-}
-
-var userConfigAttrs = map[string]attr.Type{"tables": types.SetType{ElemType: types.ObjectType{AttrTypes: tablesAttrs}}}
-
-// tfoTables Table to create
-type tfoTables struct {
- AutoOffsetReset types.String `tfsdk:"auto_offset_reset"`
- Columns types.Set `tfsdk:"columns"`
- DataFormat types.String `tfsdk:"data_format"`
- DateTimeInputFormat types.String `tfsdk:"date_time_input_format"`
- GroupName types.String `tfsdk:"group_name"`
- HandleErrorMode types.String `tfsdk:"handle_error_mode"`
- MaxBlockSize types.Int64 `tfsdk:"max_block_size"`
- MaxRowsPerMessage types.Int64 `tfsdk:"max_rows_per_message"`
- Name types.String `tfsdk:"name"`
- NumConsumers types.Int64 `tfsdk:"num_consumers"`
- PollMaxBatchSize types.Int64 `tfsdk:"poll_max_batch_size"`
- SkipBrokenMessages types.Int64 `tfsdk:"skip_broken_messages"`
- Topics types.Set `tfsdk:"topics"`
-}
-
-// dtoTables request/response object
-type dtoTables struct {
- AutoOffsetReset *string `groups:"create,update" json:"auto_offset_reset,omitempty"`
- Columns []*dtoColumns `groups:"create,update" json:"columns"`
- DataFormat string `groups:"create,update" json:"data_format"`
- DateTimeInputFormat *string `groups:"create,update" json:"date_time_input_format,omitempty"`
- GroupName string `groups:"create,update" json:"group_name"`
- HandleErrorMode *string `groups:"create,update" json:"handle_error_mode,omitempty"`
- MaxBlockSize *int64 `groups:"create,update" json:"max_block_size,omitempty"`
- MaxRowsPerMessage *int64 `groups:"create,update" json:"max_rows_per_message,omitempty"`
- Name string `groups:"create,update" json:"name"`
- NumConsumers *int64 `groups:"create,update" json:"num_consumers,omitempty"`
- PollMaxBatchSize *int64 `groups:"create,update" json:"poll_max_batch_size,omitempty"`
- SkipBrokenMessages *int64 `groups:"create,update" json:"skip_broken_messages,omitempty"`
- Topics []*dtoTopics `groups:"create,update" json:"topics"`
-}
-
-// expandTables expands tf object into dto object
-func expandTables(ctx context.Context, diags *diag.Diagnostics, o *tfoTables) *dtoTables {
- columnsVar := schemautil.ExpandSetNested[tfoColumns, dtoColumns](ctx, diags, expandColumns, o.Columns)
- if diags.HasError() {
- return nil
- }
- topicsVar := schemautil.ExpandSetNested[tfoTopics, dtoTopics](ctx, diags, expandTopics, o.Topics)
- if diags.HasError() {
- return nil
- }
- return &dtoTables{
- AutoOffsetReset: schemautil.ValueStringPointer(o.AutoOffsetReset),
- Columns: columnsVar,
- DataFormat: o.DataFormat.ValueString(),
- DateTimeInputFormat: schemautil.ValueStringPointer(o.DateTimeInputFormat),
- GroupName: o.GroupName.ValueString(),
- HandleErrorMode: schemautil.ValueStringPointer(o.HandleErrorMode),
- MaxBlockSize: schemautil.ValueInt64Pointer(o.MaxBlockSize),
- MaxRowsPerMessage: schemautil.ValueInt64Pointer(o.MaxRowsPerMessage),
- Name: o.Name.ValueString(),
- NumConsumers: schemautil.ValueInt64Pointer(o.NumConsumers),
- PollMaxBatchSize: schemautil.ValueInt64Pointer(o.PollMaxBatchSize),
- SkipBrokenMessages: schemautil.ValueInt64Pointer(o.SkipBrokenMessages),
- Topics: topicsVar,
- }
-}
-
-// flattenTables flattens dto object into tf object
-func flattenTables(ctx context.Context, diags *diag.Diagnostics, o *dtoTables) *tfoTables {
- columnsVar := schemautil.FlattenSetNested[dtoColumns, tfoColumns](ctx, diags, flattenColumns, columnsAttrs, o.Columns)
- if diags.HasError() {
- return nil
- }
- topicsVar := schemautil.FlattenSetNested[dtoTopics, tfoTopics](ctx, diags, flattenTopics, topicsAttrs, o.Topics)
- if diags.HasError() {
- return nil
- }
- return &tfoTables{
- AutoOffsetReset: types.StringPointerValue(o.AutoOffsetReset),
- Columns: columnsVar,
- DataFormat: types.StringValue(o.DataFormat),
- DateTimeInputFormat: types.StringPointerValue(o.DateTimeInputFormat),
- GroupName: types.StringValue(o.GroupName),
- HandleErrorMode: types.StringPointerValue(o.HandleErrorMode),
- MaxBlockSize: types.Int64PointerValue(o.MaxBlockSize),
- MaxRowsPerMessage: types.Int64PointerValue(o.MaxRowsPerMessage),
- Name: types.StringValue(o.Name),
- NumConsumers: types.Int64PointerValue(o.NumConsumers),
- PollMaxBatchSize: types.Int64PointerValue(o.PollMaxBatchSize),
- SkipBrokenMessages: types.Int64PointerValue(o.SkipBrokenMessages),
- Topics: topicsVar,
- }
-}
-
-var tablesAttrs = map[string]attr.Type{
- "auto_offset_reset": types.StringType,
- "columns": types.SetType{ElemType: types.ObjectType{AttrTypes: columnsAttrs}},
- "data_format": types.StringType,
- "date_time_input_format": types.StringType,
- "group_name": types.StringType,
- "handle_error_mode": types.StringType,
- "max_block_size": types.Int64Type,
- "max_rows_per_message": types.Int64Type,
- "name": types.StringType,
- "num_consumers": types.Int64Type,
- "poll_max_batch_size": types.Int64Type,
- "skip_broken_messages": types.Int64Type,
- "topics": types.SetType{ElemType: types.ObjectType{AttrTypes: topicsAttrs}},
-}
-
-// tfoColumns Table column
-type tfoColumns struct {
- Name types.String `tfsdk:"name"`
- Type types.String `tfsdk:"type"`
-}
-
-// dtoColumns request/response object
-type dtoColumns struct {
- Name string `groups:"create,update" json:"name"`
- Type string `groups:"create,update" json:"type"`
-}
-
-// expandColumns expands tf object into dto object
-func expandColumns(ctx context.Context, diags *diag.Diagnostics, o *tfoColumns) *dtoColumns {
- return &dtoColumns{
- Name: o.Name.ValueString(),
- Type: o.Type.ValueString(),
- }
-}
-
-// flattenColumns flattens dto object into tf object
-func flattenColumns(ctx context.Context, diags *diag.Diagnostics, o *dtoColumns) *tfoColumns {
- return &tfoColumns{
- Name: types.StringValue(o.Name),
- Type: types.StringValue(o.Type),
- }
-}
-
-var columnsAttrs = map[string]attr.Type{
- "name": types.StringType,
- "type": types.StringType,
-}
-
-// tfoTopics Kafka topic
-type tfoTopics struct {
- Name types.String `tfsdk:"name"`
-}
-
-// dtoTopics request/response object
-type dtoTopics struct {
- Name string `groups:"create,update" json:"name"`
-}
-
-// expandTopics expands tf object into dto object
-func expandTopics(ctx context.Context, diags *diag.Diagnostics, o *tfoTopics) *dtoTopics {
- return &dtoTopics{Name: o.Name.ValueString()}
-}
-
-// flattenTopics flattens dto object into tf object
-func flattenTopics(ctx context.Context, diags *diag.Diagnostics, o *dtoTopics) *tfoTopics {
- return &tfoTopics{Name: types.StringValue(o.Name)}
-}
-
-var topicsAttrs = map[string]attr.Type{"name": types.StringType}
-
-// Expand public function that converts tf object into dto
-func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig {
- return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set)
-}
-
-// Flatten public function that converts dto into tf object
-func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set {
- o := new(dtoUserConfig)
- err := schemautil.MapToDTO(m, o)
- if err != nil {
- diags.AddError("failed to marshal map user config to dto", err.Error())
- return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs})
- }
- return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o)
-}
diff --git a/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka_test.go b/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka_test.go
deleted file mode 100644
index 0f1a9a6ee..000000000
--- a/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka_test.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Code generated by user config generator. DO NOT EDIT.
-
-package clickhousekafka
-
-import (
- "context"
- "encoding/json"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/hashicorp/terraform-plugin-framework/diag"
- "github.com/stretchr/testify/require"
-
- "github.com/aiven/terraform-provider-aiven/internal/schemautil"
-)
-
-const allFields = `{
- "tables": [
- {
- "auto_offset_reset": "foo",
- "columns": [
- {
- "name": "foo",
- "type": "foo"
- }
- ],
- "data_format": "foo",
- "date_time_input_format": "foo",
- "group_name": "foo",
- "handle_error_mode": "foo",
- "max_block_size": 1,
- "max_rows_per_message": 1,
- "name": "foo",
- "num_consumers": 1,
- "poll_max_batch_size": 1,
- "skip_broken_messages": 1,
- "topics": [
- {
- "name": "foo"
- }
- ]
- }
- ]
-}`
-const updateOnlyFields = `{
- "tables": [
- {
- "auto_offset_reset": "foo",
- "columns": [
- {
- "name": "foo",
- "type": "foo"
- }
- ],
- "data_format": "foo",
- "date_time_input_format": "foo",
- "group_name": "foo",
- "handle_error_mode": "foo",
- "max_block_size": 1,
- "max_rows_per_message": 1,
- "name": "foo",
- "num_consumers": 1,
- "poll_max_batch_size": 1,
- "skip_broken_messages": 1,
- "topics": [
- {
- "name": "foo"
- }
- ]
- }
- ]
-}`
-
-func TestUserConfig(t *testing.T) {
- cases := []struct {
- name string
- source string
- expect string
- create bool
- }{
- {
- name: "fields to create resource",
- source: allFields,
- expect: allFields,
- create: true,
- },
- {
- name: "only fields to update resource",
- source: allFields,
- expect: updateOnlyFields, // usually, fewer fields
- create: false,
- },
- }
-
- ctx := context.Background()
- diags := new(diag.Diagnostics)
- for _, opt := range cases {
- t.Run(opt.name, func(t *testing.T) {
- dto := new(dtoUserConfig)
- err := json.Unmarshal([]byte(opt.source), dto)
- require.NoError(t, err)
-
- // From json to TF
- tfo := flattenUserConfig(ctx, diags, dto)
- require.Empty(t, diags)
-
- // From TF to json
- config := expandUserConfig(ctx, diags, tfo)
- require.Empty(t, diags)
-
- // Run specific marshal (create or update resource)
- dtoConfig, err := schemautil.MarshalUserConfig(config, opt.create)
- require.NoError(t, err)
-
- // Compares that output is strictly equal to the input
- // If so, the flow is valid
- b, err := json.MarshalIndent(dtoConfig, "", " ")
- require.NoError(t, err)
- require.Empty(t, cmp.Diff(opt.expect, string(b)))
- })
- }
-}
diff --git a/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql.go b/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql.go
deleted file mode 100644
index e67ab4a68..000000000
--- a/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Code generated by user config generator. DO NOT EDIT.
-
-package clickhousepostgresql
-
-import (
- "context"
-
- setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator"
- attr "github.com/hashicorp/terraform-plugin-framework/attr"
- datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
- diag "github.com/hashicorp/terraform-plugin-framework/diag"
- resource "github.com/hashicorp/terraform-plugin-framework/resource/schema"
- stringdefault "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault"
- validator "github.com/hashicorp/terraform-plugin-framework/schema/validator"
- types "github.com/hashicorp/terraform-plugin-framework/types"
-
- schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil"
-)
-
-// NewResourceSchema returns resource schema
-func NewResourceSchema() resource.SetNestedBlock {
- return resource.SetNestedBlock{
- Description: "Integration user config",
- NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"databases": resource.SetNestedBlock{
- Description: "Databases to expose",
- NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{
- "database": resource.StringAttribute{
- Computed: true,
- Default: stringdefault.StaticString("defaultdb"),
- Description: "PostgreSQL database to expose. The default value is `defaultdb`.",
- Optional: true,
- },
- "schema": resource.StringAttribute{
- Computed: true,
- Default: stringdefault.StaticString("public"),
- Description: "PostgreSQL schema to expose. The default value is `public`.",
- Optional: true,
- },
- }},
- Validators: []validator.Set{setvalidator.SizeAtMost(10)},
- }}},
- Validators: []validator.Set{setvalidator.SizeAtMost(1)},
- }
-}
-
-// NewDataSourceSchema returns datasource schema
-func NewDataSourceSchema() datasource.SetNestedBlock {
- return datasource.SetNestedBlock{
- Description: "Integration user config",
- NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"databases": datasource.SetNestedBlock{
- Description: "Databases to expose",
- NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{
- "database": datasource.StringAttribute{
- Computed: true,
- Description: "PostgreSQL database to expose. The default value is `defaultdb`.",
- },
- "schema": datasource.StringAttribute{
- Computed: true,
- Description: "PostgreSQL schema to expose. The default value is `public`.",
- },
- }},
- Validators: []validator.Set{setvalidator.SizeAtMost(10)},
- }}},
- Validators: []validator.Set{setvalidator.SizeAtMost(1)},
- }
-}
-
-// tfoUserConfig Integration user config
-type tfoUserConfig struct {
- Databases types.Set `tfsdk:"databases"`
-}
-
-// dtoUserConfig request/response object
-type dtoUserConfig struct {
- Databases []*dtoDatabases `groups:"create,update" json:"databases,omitempty"`
-}
-
-// expandUserConfig expands tf object into dto object
-func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig {
- databasesVar := schemautil.ExpandSetNested[tfoDatabases, dtoDatabases](ctx, diags, expandDatabases, o.Databases)
- if diags.HasError() {
- return nil
- }
- return &dtoUserConfig{Databases: databasesVar}
-}
-
-// flattenUserConfig flattens dto object into tf object
-func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig {
- databasesVar := schemautil.FlattenSetNested[dtoDatabases, tfoDatabases](ctx, diags, flattenDatabases, databasesAttrs, o.Databases)
- if diags.HasError() {
- return nil
- }
- return &tfoUserConfig{Databases: databasesVar}
-}
-
-var userConfigAttrs = map[string]attr.Type{"databases": types.SetType{ElemType: types.ObjectType{AttrTypes: databasesAttrs}}}
-
-// tfoDatabases Database to expose
-type tfoDatabases struct {
- Database types.String `tfsdk:"database"`
- Schema types.String `tfsdk:"schema"`
-}
-
-// dtoDatabases request/response object
-type dtoDatabases struct {
- Database *string `groups:"create,update" json:"database,omitempty"`
- Schema *string `groups:"create,update" json:"schema,omitempty"`
-}
-
-// expandDatabases expands tf object into dto object
-func expandDatabases(ctx context.Context, diags *diag.Diagnostics, o *tfoDatabases) *dtoDatabases {
- return &dtoDatabases{
- Database: schemautil.ValueStringPointer(o.Database),
- Schema: schemautil.ValueStringPointer(o.Schema),
- }
-}
-
-// flattenDatabases flattens dto object into tf object
-func flattenDatabases(ctx context.Context, diags *diag.Diagnostics, o *dtoDatabases) *tfoDatabases {
- return &tfoDatabases{
- Database: types.StringPointerValue(o.Database),
- Schema: types.StringPointerValue(o.Schema),
- }
-}
-
-var databasesAttrs = map[string]attr.Type{
- "database": types.StringType,
- "schema": types.StringType,
-}
-
-// Expand public function that converts tf object into dto
-func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig {
- return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set)
-}
-
-// Flatten public function that converts dto into tf object
-func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set {
- o := new(dtoUserConfig)
- err := schemautil.MapToDTO(m, o)
- if err != nil {
- diags.AddError("failed to marshal map user config to dto", err.Error())
- return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs})
- }
- return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o)
-}
diff --git a/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql_test.go b/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql_test.go
deleted file mode 100644
index 330c56bc1..000000000
--- a/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql_test.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Code generated by user config generator. DO NOT EDIT.
-
-package clickhousepostgresql
-
-import (
- "context"
- "encoding/json"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/hashicorp/terraform-plugin-framework/diag"
- "github.com/stretchr/testify/require"
-
- "github.com/aiven/terraform-provider-aiven/internal/schemautil"
-)
-
-const allFields = `{
- "databases": [
- {
- "database": "foo",
- "schema": "foo"
- }
- ]
-}`
-const updateOnlyFields = `{
- "databases": [
- {
- "database": "foo",
- "schema": "foo"
- }
- ]
-}`
-
-func TestUserConfig(t *testing.T) {
- cases := []struct {
- name string
- source string
- expect string
- create bool
- }{
- {
- name: "fields to create resource",
- source: allFields,
- expect: allFields,
- create: true,
- },
- {
- name: "only fields to update resource",
- source: allFields,
- expect: updateOnlyFields, // usually, fewer fields
- create: false,
- },
- }
-
- ctx := context.Background()
- diags := new(diag.Diagnostics)
- for _, opt := range cases {
- t.Run(opt.name, func(t *testing.T) {
- dto := new(dtoUserConfig)
- err := json.Unmarshal([]byte(opt.source), dto)
- require.NoError(t, err)
-
- // From json to TF
- tfo := flattenUserConfig(ctx, diags, dto)
- require.Empty(t, diags)
-
- // From TF to json
- config := expandUserConfig(ctx, diags, tfo)
- require.Empty(t, diags)
-
- // Run specific marshal (create or update resource)
- dtoConfig, err := schemautil.MarshalUserConfig(config, opt.create)
- require.NoError(t, err)
-
- // Compares that output is strictly equal to the input
- // If so, the flow is valid
- b, err := json.MarshalIndent(dtoConfig, "", " ")
- require.NoError(t, err)
- require.Empty(t, cmp.Diff(opt.expect, string(b)))
- })
- }
-}
diff --git a/internal/plugin/service/userconfig/integration/datadog/datadog.go b/internal/plugin/service/userconfig/integration/datadog/datadog.go
deleted file mode 100644
index 0cde9d5f6..000000000
--- a/internal/plugin/service/userconfig/integration/datadog/datadog.go
+++ /dev/null
@@ -1,460 +0,0 @@
-// Code generated by user config generator. DO NOT EDIT.
-
-package datadog
-
-import (
- "context"
-
- setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator"
- attr "github.com/hashicorp/terraform-plugin-framework/attr"
- datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
- diag "github.com/hashicorp/terraform-plugin-framework/diag"
- resource "github.com/hashicorp/terraform-plugin-framework/resource/schema"
- booldefault "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault"
- validator "github.com/hashicorp/terraform-plugin-framework/schema/validator"
- types "github.com/hashicorp/terraform-plugin-framework/types"
-
- schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil"
-)
-
-// NewResourceSchema returns resource schema
-func NewResourceSchema() resource.SetNestedBlock {
- return resource.SetNestedBlock{
- NestedObject: resource.NestedBlockObject{
- Attributes: map[string]resource.Attribute{
- "datadog_dbm_enabled": resource.BoolAttribute{
- Computed: true,
- Description: "Enable Datadog Database Monitoring.",
- Optional: true,
- },
- "exclude_consumer_groups": resource.SetAttribute{
- Computed: true,
- Description: "List of custom metrics.",
- ElementType: types.StringType,
- Optional: true,
- Validators: []validator.Set{setvalidator.SizeAtMost(1024)},
- },
- "exclude_topics": resource.SetAttribute{
- Computed: true,
- Description: "List of topics to exclude.",
- ElementType: types.StringType,
- Optional: true,
- Validators: []validator.Set{setvalidator.SizeAtMost(1024)},
- },
- "include_consumer_groups": resource.SetAttribute{
- Computed: true,
- Description: "List of custom metrics.",
- ElementType: types.StringType,
- Optional: true,
- Validators: []validator.Set{setvalidator.SizeAtMost(1024)},
- },
- "include_topics": resource.SetAttribute{
- Computed: true,
- Description: "List of topics to include.",
- ElementType: types.StringType,
- Optional: true,
- Validators: []validator.Set{setvalidator.SizeAtMost(1024)},
- },
- "kafka_custom_metrics": resource.SetAttribute{
- Computed: true,
- Description: "List of custom metrics.",
- ElementType: types.StringType,
- Optional: true,
- Validators: []validator.Set{setvalidator.SizeAtMost(1024)},
- },
- "max_jmx_metrics": resource.Int64Attribute{
- Computed: true,
- Description: "Maximum number of JMX metrics to send.",
- Optional: true,
- },
- },
- Blocks: map[string]resource.Block{
- "datadog_tags": resource.SetNestedBlock{
- Description: "Custom tags provided by user",
- NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{
- "comment": resource.StringAttribute{
- Computed: true,
- Description: "Optional tag explanation.",
- Optional: true,
- },
- "tag": resource.StringAttribute{
- Description: "Tag format and usage are described here: https://docs.datadoghq.com/getting_started/tagging. Tags with prefix 'aiven-' are reserved for Aiven.",
- Required: true,
- },
- }},
- Validators: []validator.Set{setvalidator.SizeAtMost(32)},
- },
- "opensearch": resource.SetNestedBlock{
- Description: "Datadog Opensearch Options",
- NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{
- "index_stats_enabled": resource.BoolAttribute{
- Computed: true,
- Description: "Enable Datadog Opensearch Index Monitoring.",
- Optional: true,
- },
- "pending_task_stats_enabled": resource.BoolAttribute{
- Computed: true,
- Description: "Enable Datadog Opensearch Pending Task Monitoring.",
- Optional: true,
- },
- "pshard_stats_enabled": resource.BoolAttribute{
- Computed: true,
- Description: "Enable Datadog Opensearch Primary Shard Monitoring.",
- Optional: true,
- },
- }},
- },
- "redis": resource.SetNestedBlock{
- Description: "Datadog Redis Options",
- NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{"command_stats_enabled": resource.BoolAttribute{
- Computed: true,
- Default: booldefault.StaticBool(false),
- Description: "Enable command_stats option in the agent's configuration. The default value is `false`.",
- Optional: true,
- }}},
- },
- },
- },
- Validators: []validator.Set{setvalidator.SizeAtMost(1)},
- }
-}
-
-// NewDataSourceSchema returns datasource schema
-func NewDataSourceSchema() datasource.SetNestedBlock {
- return datasource.SetNestedBlock{
- NestedObject: datasource.NestedBlockObject{
- Attributes: map[string]datasource.Attribute{
- "datadog_dbm_enabled": datasource.BoolAttribute{
- Computed: true,
- Description: "Enable Datadog Database Monitoring.",
- },
- "exclude_consumer_groups": datasource.SetAttribute{
- Computed: true,
- Description: "List of custom metrics.",
- ElementType: types.StringType,
- Validators: []validator.Set{setvalidator.SizeAtMost(1024)},
- },
- "exclude_topics": datasource.SetAttribute{
- Computed: true,
- Description: "List of topics to exclude.",
- ElementType: types.StringType,
- Validators: []validator.Set{setvalidator.SizeAtMost(1024)},
- },
- "include_consumer_groups": datasource.SetAttribute{
- Computed: true,
- Description: "List of custom metrics.",
- ElementType: types.StringType,
- Validators: []validator.Set{setvalidator.SizeAtMost(1024)},
- },
- "include_topics": datasource.SetAttribute{
- Computed: true,
- Description: "List of topics to include.",
- ElementType: types.StringType,
- Validators: []validator.Set{setvalidator.SizeAtMost(1024)},
- },
- "kafka_custom_metrics": datasource.SetAttribute{
- Computed: true,
- Description: "List of custom metrics.",
- ElementType: types.StringType,
- Validators: []validator.Set{setvalidator.SizeAtMost(1024)},
- },
- "max_jmx_metrics": datasource.Int64Attribute{
- Computed: true,
- Description: "Maximum number of JMX metrics to send.",
- },
- },
- Blocks: map[string]datasource.Block{
- "datadog_tags": datasource.SetNestedBlock{
- Description: "Custom tags provided by user",
- NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{
- "comment": datasource.StringAttribute{
- Computed: true,
- Description: "Optional tag explanation.",
- },
- "tag": datasource.StringAttribute{
- Computed: true,
- Description: "Tag format and usage are described here: https://docs.datadoghq.com/getting_started/tagging. Tags with prefix 'aiven-' are reserved for Aiven.",
- },
- }},
- Validators: []validator.Set{setvalidator.SizeAtMost(32)},
- },
- "opensearch": datasource.SetNestedBlock{
- Description: "Datadog Opensearch Options",
- NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{
- "index_stats_enabled": datasource.BoolAttribute{
- Computed: true,
- Description: "Enable Datadog Opensearch Index Monitoring.",
- },
- "pending_task_stats_enabled": datasource.BoolAttribute{
- Computed: true,
- Description: "Enable Datadog Opensearch Pending Task Monitoring.",
- },
- "pshard_stats_enabled": datasource.BoolAttribute{
- Computed: true,
- Description: "Enable Datadog Opensearch Primary Shard Monitoring.",
- },
- }},
- },
- "redis": datasource.SetNestedBlock{
- Description: "Datadog Redis Options",
- NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{"command_stats_enabled": datasource.BoolAttribute{
- Computed: true,
- Description: "Enable command_stats option in the agent's configuration. The default value is `false`.",
- }}},
- },
- },
- },
- Validators: []validator.Set{setvalidator.SizeAtMost(1)},
- }
-}
-
-// tfoUserConfig
-type tfoUserConfig struct {
- DatadogDbmEnabled types.Bool `tfsdk:"datadog_dbm_enabled"`
- DatadogTags types.Set `tfsdk:"datadog_tags"`
- ExcludeConsumerGroups types.Set `tfsdk:"exclude_consumer_groups"`
- ExcludeTopics types.Set `tfsdk:"exclude_topics"`
- IncludeConsumerGroups types.Set `tfsdk:"include_consumer_groups"`
- IncludeTopics types.Set `tfsdk:"include_topics"`
- KafkaCustomMetrics types.Set `tfsdk:"kafka_custom_metrics"`
- MaxJmxMetrics types.Int64 `tfsdk:"max_jmx_metrics"`
- Opensearch types.Set `tfsdk:"opensearch"`
- Redis types.Set `tfsdk:"redis"`
-}
-
-// dtoUserConfig request/response object
-type dtoUserConfig struct {
- DatadogDbmEnabled *bool `groups:"create,update" json:"datadog_dbm_enabled,omitempty"`
- DatadogTags []*dtoDatadogTags `groups:"create,update" json:"datadog_tags,omitempty"`
- ExcludeConsumerGroups []string `groups:"create,update" json:"exclude_consumer_groups,omitempty"`
- ExcludeTopics []string `groups:"create,update" json:"exclude_topics,omitempty"`
- IncludeConsumerGroups []string `groups:"create,update" json:"include_consumer_groups,omitempty"`
- IncludeTopics []string `groups:"create,update" json:"include_topics,omitempty"`
- KafkaCustomMetrics []string `groups:"create,update" json:"kafka_custom_metrics,omitempty"`
- MaxJmxMetrics *int64 `groups:"create,update" json:"max_jmx_metrics,omitempty"`
- Opensearch *dtoOpensearch `groups:"create,update" json:"opensearch,omitempty"`
- Redis *dtoRedis `groups:"create,update" json:"redis,omitempty"`
-}
-
-// expandUserConfig expands tf object into dto object
-func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig {
- datadogTagsVar := schemautil.ExpandSetNested[tfoDatadogTags, dtoDatadogTags](ctx, diags, expandDatadogTags, o.DatadogTags)
- if diags.HasError() {
- return nil
- }
- excludeConsumerGroupsVar := schemautil.ExpandSet[string](ctx, diags, o.ExcludeConsumerGroups)
- if diags.HasError() {
- return nil
- }
- excludeTopicsVar := schemautil.ExpandSet[string](ctx, diags, o.ExcludeTopics)
- if diags.HasError() {
- return nil
- }
- includeConsumerGroupsVar := schemautil.ExpandSet[string](ctx, diags, o.IncludeConsumerGroups)
- if diags.HasError() {
- return nil
- }
- includeTopicsVar := schemautil.ExpandSet[string](ctx, diags, o.IncludeTopics)
- if diags.HasError() {
- return nil
- }
- kafkaCustomMetricsVar := schemautil.ExpandSet[string](ctx, diags, o.KafkaCustomMetrics)
- if diags.HasError() {
- return nil
- }
- opensearchVar := schemautil.ExpandSetBlockNested[tfoOpensearch, dtoOpensearch](ctx, diags, expandOpensearch, o.Opensearch)
- if diags.HasError() {
- return nil
- }
- redisVar := schemautil.ExpandSetBlockNested[tfoRedis, dtoRedis](ctx, diags, expandRedis, o.Redis)
- if diags.HasError() {
- return nil
- }
- return &dtoUserConfig{
- DatadogDbmEnabled: schemautil.ValueBoolPointer(o.DatadogDbmEnabled),
- DatadogTags: datadogTagsVar,
- ExcludeConsumerGroups: excludeConsumerGroupsVar,
- ExcludeTopics: excludeTopicsVar,
- IncludeConsumerGroups: includeConsumerGroupsVar,
- IncludeTopics: includeTopicsVar,
- KafkaCustomMetrics: kafkaCustomMetricsVar,
- MaxJmxMetrics: schemautil.ValueInt64Pointer(o.MaxJmxMetrics),
- Opensearch: opensearchVar,
- Redis: redisVar,
- }
-}
-
-// flattenUserConfig flattens dto object into tf object
-func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig {
- datadogTagsVar := schemautil.FlattenSetNested[dtoDatadogTags, tfoDatadogTags](ctx, diags, flattenDatadogTags, datadogTagsAttrs, o.DatadogTags)
- if diags.HasError() {
- return nil
- }
- excludeConsumerGroupsVar, d := types.SetValueFrom(ctx, types.StringType, o.ExcludeConsumerGroups)
- diags.Append(d...)
- if diags.HasError() {
- return nil
- }
- excludeTopicsVar, d := types.SetValueFrom(ctx, types.StringType, o.ExcludeTopics)
- diags.Append(d...)
- if diags.HasError() {
- return nil
- }
- includeConsumerGroupsVar, d := types.SetValueFrom(ctx, types.StringType, o.IncludeConsumerGroups)
- diags.Append(d...)
- if diags.HasError() {
- return nil
- }
- includeTopicsVar, d := types.SetValueFrom(ctx, types.StringType, o.IncludeTopics)
- diags.Append(d...)
- if diags.HasError() {
- return nil
- }
- kafkaCustomMetricsVar, d := types.SetValueFrom(ctx, types.StringType, o.KafkaCustomMetrics)
- diags.Append(d...)
- if diags.HasError() {
- return nil
- }
- opensearchVar := schemautil.FlattenSetBlockNested[dtoOpensearch, tfoOpensearch](ctx, diags, flattenOpensearch, opensearchAttrs, o.Opensearch)
- if diags.HasError() {
- return nil
- }
- redisVar := schemautil.FlattenSetBlockNested[dtoRedis, tfoRedis](ctx, diags, flattenRedis, redisAttrs, o.Redis)
- if diags.HasError() {
- return nil
- }
- return &tfoUserConfig{
- DatadogDbmEnabled: types.BoolPointerValue(o.DatadogDbmEnabled),
- DatadogTags: datadogTagsVar,
- ExcludeConsumerGroups: excludeConsumerGroupsVar,
- ExcludeTopics: excludeTopicsVar,
- IncludeConsumerGroups: includeConsumerGroupsVar,
- IncludeTopics: includeTopicsVar,
- KafkaCustomMetrics: kafkaCustomMetricsVar,
- MaxJmxMetrics: types.Int64PointerValue(o.MaxJmxMetrics),
- Opensearch: opensearchVar,
- Redis: redisVar,
- }
-}
-
-var userConfigAttrs = map[string]attr.Type{
- "datadog_dbm_enabled": types.BoolType,
- "datadog_tags": types.SetType{ElemType: types.ObjectType{AttrTypes: datadogTagsAttrs}},
- "exclude_consumer_groups": types.SetType{ElemType: types.StringType},
- "exclude_topics": types.SetType{ElemType: types.StringType},
- "include_consumer_groups": types.SetType{ElemType: types.StringType},
- "include_topics": types.SetType{ElemType: types.StringType},
- "kafka_custom_metrics": types.SetType{ElemType: types.StringType},
- "max_jmx_metrics": types.Int64Type,
- "opensearch": types.SetType{ElemType: types.ObjectType{AttrTypes: opensearchAttrs}},
- "redis": types.SetType{ElemType: types.ObjectType{AttrTypes: redisAttrs}},
-}
-
-// tfoDatadogTags Datadog tag defined by user
-type tfoDatadogTags struct {
- Comment types.String `tfsdk:"comment"`
- Tag types.String `tfsdk:"tag"`
-}
-
-// dtoDatadogTags request/response object
-type dtoDatadogTags struct {
- Comment *string `groups:"create,update" json:"comment,omitempty"`
- Tag string `groups:"create,update" json:"tag"`
-}
-
-// expandDatadogTags expands tf object into dto object
-func expandDatadogTags(ctx context.Context, diags *diag.Diagnostics, o *tfoDatadogTags) *dtoDatadogTags {
- return &dtoDatadogTags{
- Comment: schemautil.ValueStringPointer(o.Comment),
- Tag: o.Tag.ValueString(),
- }
-}
-
-// flattenDatadogTags flattens dto object into tf object
-func flattenDatadogTags(ctx context.Context, diags *diag.Diagnostics, o *dtoDatadogTags) *tfoDatadogTags {
- return &tfoDatadogTags{
- Comment: types.StringPointerValue(o.Comment),
- Tag: types.StringValue(o.Tag),
- }
-}
-
-var datadogTagsAttrs = map[string]attr.Type{
- "comment": types.StringType,
- "tag": types.StringType,
-}
-
-// tfoOpensearch Datadog Opensearch Options
-type tfoOpensearch struct {
- IndexStatsEnabled types.Bool `tfsdk:"index_stats_enabled"`
- PendingTaskStatsEnabled types.Bool `tfsdk:"pending_task_stats_enabled"`
- PshardStatsEnabled types.Bool `tfsdk:"pshard_stats_enabled"`
-}
-
-// dtoOpensearch request/response object
-type dtoOpensearch struct {
- IndexStatsEnabled *bool `groups:"create,update" json:"index_stats_enabled,omitempty"`
- PendingTaskStatsEnabled *bool `groups:"create,update" json:"pending_task_stats_enabled,omitempty"`
- PshardStatsEnabled *bool `groups:"create,update" json:"pshard_stats_enabled,omitempty"`
-}
-
-// expandOpensearch expands tf object into dto object
-func expandOpensearch(ctx context.Context, diags *diag.Diagnostics, o *tfoOpensearch) *dtoOpensearch {
- return &dtoOpensearch{
- IndexStatsEnabled: schemautil.ValueBoolPointer(o.IndexStatsEnabled),
- PendingTaskStatsEnabled: schemautil.ValueBoolPointer(o.PendingTaskStatsEnabled),
- PshardStatsEnabled: schemautil.ValueBoolPointer(o.PshardStatsEnabled),
- }
-}
-
-// flattenOpensearch flattens dto object into tf object
-func flattenOpensearch(ctx context.Context, diags *diag.Diagnostics, o *dtoOpensearch) *tfoOpensearch {
- return &tfoOpensearch{
- IndexStatsEnabled: types.BoolPointerValue(o.IndexStatsEnabled),
- PendingTaskStatsEnabled: types.BoolPointerValue(o.PendingTaskStatsEnabled),
- PshardStatsEnabled: types.BoolPointerValue(o.PshardStatsEnabled),
- }
-}
-
-var opensearchAttrs = map[string]attr.Type{
- "index_stats_enabled": types.BoolType,
- "pending_task_stats_enabled": types.BoolType,
- "pshard_stats_enabled": types.BoolType,
-}
-
-// tfoRedis Datadog Redis Options
-type tfoRedis struct {
- CommandStatsEnabled types.Bool `tfsdk:"command_stats_enabled"`
-}
-
-// dtoRedis request/response object
-type dtoRedis struct {
- CommandStatsEnabled *bool `groups:"create,update" json:"command_stats_enabled,omitempty"`
-}
-
-// expandRedis expands tf object into dto object
-func expandRedis(ctx context.Context, diags *diag.Diagnostics, o *tfoRedis) *dtoRedis {
- return &dtoRedis{CommandStatsEnabled: schemautil.ValueBoolPointer(o.CommandStatsEnabled)}
-}
-
-// flattenRedis flattens dto object into tf object
-func flattenRedis(ctx context.Context, diags *diag.Diagnostics, o *dtoRedis) *tfoRedis {
- return &tfoRedis{CommandStatsEnabled: types.BoolPointerValue(o.CommandStatsEnabled)}
-}
-
-var redisAttrs = map[string]attr.Type{"command_stats_enabled": types.BoolType}
-
-// Expand public function that converts tf object into dto
-func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig {
- return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set)
-}
-
-// Flatten public function that converts dto into tf object
-func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set {
- o := new(dtoUserConfig)
- err := schemautil.MapToDTO(m, o)
- if err != nil {
- diags.AddError("failed to marshal map user config to dto", err.Error())
- return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs})
- }
- return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o)
-}
diff --git a/internal/plugin/service/userconfig/integration/datadog/datadog_test.go b/internal/plugin/service/userconfig/integration/datadog/datadog_test.go
deleted file mode 100644
index 04190b0f4..000000000
--- a/internal/plugin/service/userconfig/integration/datadog/datadog_test.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Code generated by user config generator. DO NOT EDIT.
-
-package datadog
-
-import (
- "context"
- "encoding/json"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/hashicorp/terraform-plugin-framework/diag"
- "github.com/stretchr/testify/require"
-
- "github.com/aiven/terraform-provider-aiven/internal/schemautil"
-)
-
-const allFields = `{
- "datadog_dbm_enabled": true,
- "datadog_tags": [
- {
- "comment": "foo",
- "tag": "foo"
- }
- ],
- "exclude_consumer_groups": [
- "foo"
- ],
- "exclude_topics": [
- "foo"
- ],
- "include_consumer_groups": [
- "foo"
- ],
- "include_topics": [
- "foo"
- ],
- "kafka_custom_metrics": [
- "foo"
- ],
- "max_jmx_metrics": 1,
- "opensearch": {
- "index_stats_enabled": true,
- "pending_task_stats_enabled": true,
- "pshard_stats_enabled": true
- },
- "redis": {
- "command_stats_enabled": true
- }
-}`
-const updateOnlyFields = `{
- "datadog_dbm_enabled": true,
- "datadog_tags": [
- {
- "comment": "foo",
- "tag": "foo"
- }
- ],
- "exclude_consumer_groups": [
- "foo"
- ],
- "exclude_topics": [
- "foo"
- ],
- "include_consumer_groups": [
- "foo"
- ],
- "include_topics": [
- "foo"
- ],
- "kafka_custom_metrics": [
- "foo"
- ],
- "max_jmx_metrics": 1,
- "opensearch": {
- "index_stats_enabled": true,
- "pending_task_stats_enabled": true,
- "pshard_stats_enabled": true
- },
- "redis": {
- "command_stats_enabled": true
- }
-}`
-
-func TestUserConfig(t *testing.T) {
- cases := []struct {
- name string
- source string
- expect string
- create bool
- }{
- {
- name: "fields to create resource",
- source: allFields,
- expect: allFields,
- create: true,
- },
- {
- name: "only fields to update resource",
- source: allFields,
- expect: updateOnlyFields, // usually, fewer fields
- create: false,
- },
- }
-
- ctx := context.Background()
- diags := new(diag.Diagnostics)
- for _, opt := range cases {
- t.Run(opt.name, func(t *testing.T) {
- dto := new(dtoUserConfig)
- err := json.Unmarshal([]byte(opt.source), dto)
- require.NoError(t, err)
-
- // From json to TF
- tfo := flattenUserConfig(ctx, diags, dto)
- require.Empty(t, diags)
-
- // From TF to json
- config := expandUserConfig(ctx, diags, tfo)
- require.Empty(t, diags)
-
- // Run specific marshal (create or update resource)
- dtoConfig, err := schemautil.MarshalUserConfig(config, opt.create)
- require.NoError(t, err)
-
- // Compares that output is strictly equal to the input
- // If so, the flow is valid
- b, err := json.MarshalIndent(dtoConfig, "", " ")
- require.NoError(t, err)
- require.Empty(t, cmp.Diff(opt.expect, string(b)))
- })
- }
-}
diff --git a/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics.go b/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics.go
deleted file mode 100644
index c49842976..000000000
--- a/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics.go
+++ /dev/null
@@ -1,224 +0,0 @@
-// Code generated by user config generator. DO NOT EDIT.
-
-package externalawscloudwatchmetrics
-
-import (
- "context"
-
- setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator"
- attr "github.com/hashicorp/terraform-plugin-framework/attr"
- datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
- diag "github.com/hashicorp/terraform-plugin-framework/diag"
- resource "github.com/hashicorp/terraform-plugin-framework/resource/schema"
- validator "github.com/hashicorp/terraform-plugin-framework/schema/validator"
- types "github.com/hashicorp/terraform-plugin-framework/types"
-
- schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil"
-)
-
-// NewResourceSchema returns resource schema
-func NewResourceSchema() resource.SetNestedBlock {
- return resource.SetNestedBlock{
- Description: "External AWS CloudWatch Metrics integration user config",
- NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{
- "dropped_metrics": resource.SetNestedBlock{
- Description: "Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics)",
- NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{
- "field": resource.StringAttribute{
- Description: "Identifier of a value in the metric.",
- Required: true,
- },
- "metric": resource.StringAttribute{
- Description: "Identifier of the metric.",
- Required: true,
- },
- }},
- Validators: []validator.Set{setvalidator.SizeAtMost(1024)},
- },
- "extra_metrics": resource.SetNestedBlock{
- Description: "Metrics to allow through to AWS CloudWatch (in addition to default metrics)",
- NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{
- "field": resource.StringAttribute{
- Description: "Identifier of a value in the metric.",
- Required: true,
- },
- "metric": resource.StringAttribute{
- Description: "Identifier of the metric.",
- Required: true,
- },
- }},
- Validators: []validator.Set{setvalidator.SizeAtMost(1024)},
- },
- }},
- Validators: []validator.Set{setvalidator.SizeAtMost(1)},
- }
-}
-
-// NewDataSourceSchema returns datasource schema
-func NewDataSourceSchema() datasource.SetNestedBlock {
- return datasource.SetNestedBlock{
- Description: "External AWS CloudWatch Metrics integration user config",
- NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{
- "dropped_metrics": datasource.SetNestedBlock{
- Description: "Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics)",
- NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{
- "field": datasource.StringAttribute{
- Computed: true,
- Description: "Identifier of a value in the metric.",
- },
- "metric": datasource.StringAttribute{
- Computed: true,
- Description: "Identifier of the metric.",
- },
- }},
- Validators: []validator.Set{setvalidator.SizeAtMost(1024)},
- },
- "extra_metrics": datasource.SetNestedBlock{
- Description: "Metrics to allow through to AWS CloudWatch (in addition to default metrics)",
- NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{
- "field": datasource.StringAttribute{
- Computed: true,
- Description: "Identifier of a value in the metric.",
- },
- "metric": datasource.StringAttribute{
- Computed: true,
- Description: "Identifier of the metric.",
- },
- }},
- Validators: []validator.Set{setvalidator.SizeAtMost(1024)},
- },
- }},
- Validators: []validator.Set{setvalidator.SizeAtMost(1)},
- }
-}
-
-// tfoUserConfig External AWS CloudWatch Metrics integration user config
-type tfoUserConfig struct {
- DroppedMetrics types.Set `tfsdk:"dropped_metrics"`
- ExtraMetrics types.Set `tfsdk:"extra_metrics"`
-}
-
-// dtoUserConfig request/response object
-type dtoUserConfig struct {
- DroppedMetrics []*dtoDroppedMetrics `groups:"create,update" json:"dropped_metrics,omitempty"`
- ExtraMetrics []*dtoExtraMetrics `groups:"create,update" json:"extra_metrics,omitempty"`
-}
-
-// expandUserConfig expands tf object into dto object
-func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig {
- droppedMetricsVar := schemautil.ExpandSetNested[tfoDroppedMetrics, dtoDroppedMetrics](ctx, diags, expandDroppedMetrics, o.DroppedMetrics)
- if diags.HasError() {
- return nil
- }
- extraMetricsVar := schemautil.ExpandSetNested[tfoExtraMetrics, dtoExtraMetrics](ctx, diags, expandExtraMetrics, o.ExtraMetrics)
- if diags.HasError() {
- return nil
- }
- return &dtoUserConfig{
- DroppedMetrics: droppedMetricsVar,
- ExtraMetrics: extraMetricsVar,
- }
-}
-
-// flattenUserConfig flattens dto object into tf object
-func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig {
- droppedMetricsVar := schemautil.FlattenSetNested[dtoDroppedMetrics, tfoDroppedMetrics](ctx, diags, flattenDroppedMetrics, droppedMetricsAttrs, o.DroppedMetrics)
- if diags.HasError() {
- return nil
- }
- extraMetricsVar := schemautil.FlattenSetNested[dtoExtraMetrics, tfoExtraMetrics](ctx, diags, flattenExtraMetrics, extraMetricsAttrs, o.ExtraMetrics)
- if diags.HasError() {
- return nil
- }
- return &tfoUserConfig{
- DroppedMetrics: droppedMetricsVar,
- ExtraMetrics: extraMetricsVar,
- }
-}
-
-var userConfigAttrs = map[string]attr.Type{
- "dropped_metrics": types.SetType{ElemType: types.ObjectType{AttrTypes: droppedMetricsAttrs}},
- "extra_metrics": types.SetType{ElemType: types.ObjectType{AttrTypes: extraMetricsAttrs}},
-}
-
-// tfoDroppedMetrics Metric name and subfield
-type tfoDroppedMetrics struct {
- Field types.String `tfsdk:"field"`
- Metric types.String `tfsdk:"metric"`
-}
-
-// dtoDroppedMetrics request/response object
-type dtoDroppedMetrics struct {
- Field string `groups:"create,update" json:"field"`
- Metric string `groups:"create,update" json:"metric"`
-}
-
-// expandDroppedMetrics expands tf object into dto object
-func expandDroppedMetrics(ctx context.Context, diags *diag.Diagnostics, o *tfoDroppedMetrics) *dtoDroppedMetrics {
- return &dtoDroppedMetrics{
- Field: o.Field.ValueString(),
- Metric: o.Metric.ValueString(),
- }
-}
-
-// flattenDroppedMetrics flattens dto object into tf object
-func flattenDroppedMetrics(ctx context.Context, diags *diag.Diagnostics, o *dtoDroppedMetrics) *tfoDroppedMetrics {
- return &tfoDroppedMetrics{
- Field: types.StringValue(o.Field),
- Metric: types.StringValue(o.Metric),
- }
-}
-
-var droppedMetricsAttrs = map[string]attr.Type{
- "field": types.StringType,
- "metric": types.StringType,
-}
-
-// tfoExtraMetrics Metric name and subfield
-type tfoExtraMetrics struct {
- Field types.String `tfsdk:"field"`
- Metric types.String `tfsdk:"metric"`
-}
-
-// dtoExtraMetrics request/response object
-type dtoExtraMetrics struct {
- Field string `groups:"create,update" json:"field"`
- Metric string `groups:"create,update" json:"metric"`
-}
-
-// expandExtraMetrics expands tf object into dto object
-func expandExtraMetrics(ctx context.Context, diags *diag.Diagnostics, o *tfoExtraMetrics) *dtoExtraMetrics {
- return &dtoExtraMetrics{
- Field: o.Field.ValueString(),
- Metric: o.Metric.ValueString(),
- }
-}
-
-// flattenExtraMetrics flattens dto object into tf object
-func flattenExtraMetrics(ctx context.Context, diags *diag.Diagnostics, o *dtoExtraMetrics) *tfoExtraMetrics {
- return &tfoExtraMetrics{
- Field: types.StringValue(o.Field),
- Metric: types.StringValue(o.Metric),
- }
-}
-
-var extraMetricsAttrs = map[string]attr.Type{
- "field": types.StringType,
- "metric": types.StringType,
-}
-
-// Expand public function that converts tf object into dto
-func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig {
- return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set)
-}
-
-// Flatten public function that converts dto into tf object
-func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set {
- o := new(dtoUserConfig)
- err := schemautil.MapToDTO(m, o)
- if err != nil {
- diags.AddError("failed to marshal map user config to dto", err.Error())
- return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs})
- }
- return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o)
-}
diff --git a/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics_test.go b/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics_test.go
deleted file mode 100644
index 9795bc385..000000000
--- a/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics_test.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Code generated by user config generator. DO NOT EDIT.
-
-package externalawscloudwatchmetrics
-
-import (
- "context"
- "encoding/json"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/hashicorp/terraform-plugin-framework/diag"
- "github.com/stretchr/testify/require"
-
- "github.com/aiven/terraform-provider-aiven/internal/schemautil"
-)
-
-const allFields = `{
- "dropped_metrics": [
- {
- "field": "foo",
- "metric": "foo"
- }
- ],
- "extra_metrics": [
- {
- "field": "foo",
- "metric": "foo"
- }
- ]
-}`
-const updateOnlyFields = `{
- "dropped_metrics": [
- {
- "field": "foo",
- "metric": "foo"
- }
- ],
- "extra_metrics": [
- {
- "field": "foo",
- "metric": "foo"
- }
- ]
-}`
-
-func TestUserConfig(t *testing.T) {
- cases := []struct {
- name string
- source string
- expect string
- create bool
- }{
- {
- name: "fields to create resource",
- source: allFields,
- expect: allFields,
- create: true,
- },
- {
- name: "only fields to update resource",
- source: allFields,
- expect: updateOnlyFields, // usually, fewer fields
- create: false,
- },
- }
-
- ctx := context.Background()
- diags := new(diag.Diagnostics)
- for _, opt := range cases {
- t.Run(opt.name, func(t *testing.T) {
- dto := new(dtoUserConfig)
- err := json.Unmarshal([]byte(opt.source), dto)
- require.NoError(t, err)
-
- // From json to TF
- tfo := flattenUserConfig(ctx, diags, dto)
- require.Empty(t, diags)
-
- // From TF to json
- config := expandUserConfig(ctx, diags, tfo)
- require.Empty(t, diags)
-
- // Run specific marshal (create or update resource)
- dtoConfig, err := schemautil.MarshalUserConfig(config, opt.create)
- require.NoError(t, err)
-
- // Compares that output is strictly equal to the input
- // If so, the flow is valid
- b, err := json.MarshalIndent(dtoConfig, "", " ")
- require.NoError(t, err)
- require.Empty(t, cmp.Diff(opt.expect, string(b)))
- })
- }
-}
diff --git a/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect.go b/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect.go
deleted file mode 100644
index 9da78cb31..000000000
--- a/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect.go
+++ /dev/null
@@ -1,168 +0,0 @@
-// Code generated by user config generator. DO NOT EDIT.
-
-package kafkaconnect
-
-import (
- "context"
-
- setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator"
- attr "github.com/hashicorp/terraform-plugin-framework/attr"
- datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
- diag "github.com/hashicorp/terraform-plugin-framework/diag"
- resource "github.com/hashicorp/terraform-plugin-framework/resource/schema"
- validator "github.com/hashicorp/terraform-plugin-framework/schema/validator"
- types "github.com/hashicorp/terraform-plugin-framework/types"
-
- schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil"
-)
-
-// NewResourceSchema returns resource schema
-func NewResourceSchema() resource.SetNestedBlock {
- return resource.SetNestedBlock{
- Description: "Integration user config",
- NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"kafka_connect": resource.SetNestedBlock{
- Description: "Kafka Connect service configuration values",
- NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{
- "config_storage_topic": resource.StringAttribute{
- Computed: true,
- Description: "The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id.",
- Optional: true,
- },
- "group_id": resource.StringAttribute{
- Computed: true,
- Description: "A unique string that identifies the Connect cluster group this worker belongs to.",
- Optional: true,
- },
- "offset_storage_topic": resource.StringAttribute{
- Computed: true,
- Description: "The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id.",
- Optional: true,
- },
- "status_storage_topic": resource.StringAttribute{
- Computed: true,
- Description: "The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id.",
- Optional: true,
- },
- }},
- }}},
- Validators: []validator.Set{setvalidator.SizeAtMost(1)},
- }
-}
-
-// NewDataSourceSchema returns datasource schema
-func NewDataSourceSchema() datasource.SetNestedBlock {
- return datasource.SetNestedBlock{
- Description: "Integration user config",
- NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"kafka_connect": datasource.SetNestedBlock{
- Description: "Kafka Connect service configuration values",
- NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{
- "config_storage_topic": datasource.StringAttribute{
- Computed: true,
- Description: "The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id.",
- },
- "group_id": datasource.StringAttribute{
- Computed: true,
- Description: "A unique string that identifies the Connect cluster group this worker belongs to.",
- },
- "offset_storage_topic": datasource.StringAttribute{
- Computed: true,
- Description: "The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id.",
- },
- "status_storage_topic": datasource.StringAttribute{
- Computed: true,
- Description: "The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id.",
- },
- }},
- }}},
- Validators: []validator.Set{setvalidator.SizeAtMost(1)},
- }
-}
-
-// tfoUserConfig Integration user config
-type tfoUserConfig struct {
- KafkaConnect types.Set `tfsdk:"kafka_connect"`
-}
-
-// dtoUserConfig request/response object
-type dtoUserConfig struct {
- KafkaConnect *dtoKafkaConnect `groups:"create,update" json:"kafka_connect,omitempty"`
-}
-
-// expandUserConfig expands tf object into dto object
-func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig {
- kafkaConnectVar := schemautil.ExpandSetBlockNested[tfoKafkaConnect, dtoKafkaConnect](ctx, diags, expandKafkaConnect, o.KafkaConnect)
- if diags.HasError() {
- return nil
- }
- return &dtoUserConfig{KafkaConnect: kafkaConnectVar}
-}
-
-// flattenUserConfig flattens dto object into tf object
-func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig {
- kafkaConnectVar := schemautil.FlattenSetBlockNested[dtoKafkaConnect, tfoKafkaConnect](ctx, diags, flattenKafkaConnect, kafkaConnectAttrs, o.KafkaConnect)
- if diags.HasError() {
- return nil
- }
- return &tfoUserConfig{KafkaConnect: kafkaConnectVar}
-}
-
-var userConfigAttrs = map[string]attr.Type{"kafka_connect": types.SetType{ElemType: types.ObjectType{AttrTypes: kafkaConnectAttrs}}}
-
-// tfoKafkaConnect Kafka Connect service configuration values
-type tfoKafkaConnect struct {
- ConfigStorageTopic types.String `tfsdk:"config_storage_topic"`
- GroupId types.String `tfsdk:"group_id"`
- OffsetStorageTopic types.String `tfsdk:"offset_storage_topic"`
- StatusStorageTopic types.String `tfsdk:"status_storage_topic"`
-}
-
-// dtoKafkaConnect request/response object
-type dtoKafkaConnect struct {
- ConfigStorageTopic *string `groups:"create,update" json:"config_storage_topic,omitempty"`
- GroupId *string `groups:"create,update" json:"group_id,omitempty"`
- OffsetStorageTopic *string `groups:"create,update" json:"offset_storage_topic,omitempty"`
- StatusStorageTopic *string `groups:"create,update" json:"status_storage_topic,omitempty"`
-}
-
-// expandKafkaConnect expands tf object into dto object
-func expandKafkaConnect(ctx context.Context, diags *diag.Diagnostics, o *tfoKafkaConnect) *dtoKafkaConnect {
- return &dtoKafkaConnect{
- ConfigStorageTopic: schemautil.ValueStringPointer(o.ConfigStorageTopic),
- GroupId: schemautil.ValueStringPointer(o.GroupId),
- OffsetStorageTopic: schemautil.ValueStringPointer(o.OffsetStorageTopic),
- StatusStorageTopic: schemautil.ValueStringPointer(o.StatusStorageTopic),
- }
-}
-
-// flattenKafkaConnect flattens dto object into tf object
-func flattenKafkaConnect(ctx context.Context, diags *diag.Diagnostics, o *dtoKafkaConnect) *tfoKafkaConnect {
- return &tfoKafkaConnect{
- ConfigStorageTopic: types.StringPointerValue(o.ConfigStorageTopic),
- GroupId: types.StringPointerValue(o.GroupId),
- OffsetStorageTopic: types.StringPointerValue(o.OffsetStorageTopic),
- StatusStorageTopic: types.StringPointerValue(o.StatusStorageTopic),
- }
-}
-
-var kafkaConnectAttrs = map[string]attr.Type{
- "config_storage_topic": types.StringType,
- "group_id": types.StringType,
- "offset_storage_topic": types.StringType,
- "status_storage_topic": types.StringType,
-}
-
-// Expand public function that converts tf object into dto
-func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig {
- return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set)
-}
-
-// Flatten public function that converts dto into tf object
-func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set {
- o := new(dtoUserConfig)
- err := schemautil.MapToDTO(m, o)
- if err != nil {
- diags.AddError("failed to marshal map user config to dto", err.Error())
- return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs})
- }
- return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o)
-}
diff --git a/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect_test.go b/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect_test.go
deleted file mode 100644
index 69d9ae0cf..000000000
--- a/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect_test.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Code generated by user config generator. DO NOT EDIT.
-
-package kafkaconnect
-
-import (
- "context"
- "encoding/json"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/hashicorp/terraform-plugin-framework/diag"
- "github.com/stretchr/testify/require"
-
- "github.com/aiven/terraform-provider-aiven/internal/schemautil"
-)
-
-const allFields = `{
- "kafka_connect": {
- "config_storage_topic": "foo",
- "group_id": "foo",
- "offset_storage_topic": "foo",
- "status_storage_topic": "foo"
- }
-}`
-const updateOnlyFields = `{
- "kafka_connect": {
- "config_storage_topic": "foo",
- "group_id": "foo",
- "offset_storage_topic": "foo",
- "status_storage_topic": "foo"
- }
-}`
-
-func TestUserConfig(t *testing.T) {
- cases := []struct {
- name string
- source string
- expect string
- create bool
- }{
- {
- name: "fields to create resource",
- source: allFields,
- expect: allFields,
- create: true,
- },
- {
- name: "only fields to update resource",
- source: allFields,
- expect: updateOnlyFields, // usually, fewer fields
- create: false,
- },
- }
-
- ctx := context.Background()
- diags := new(diag.Diagnostics)
- for _, opt := range cases {
- t.Run(opt.name, func(t *testing.T) {
- dto := new(dtoUserConfig)
- err := json.Unmarshal([]byte(opt.source), dto)
- require.NoError(t, err)
-
- // From json to TF
- tfo := flattenUserConfig(ctx, diags, dto)
- require.Empty(t, diags)
-
- // From TF to json
- config := expandUserConfig(ctx, diags, tfo)
- require.Empty(t, diags)
-
- // Run specific marshal (create or update resource)
- dtoConfig, err := schemautil.MarshalUserConfig(config, opt.create)
- require.NoError(t, err)
-
- // Compares that output is strictly equal to the input
- // If so, the flow is valid
- b, err := json.MarshalIndent(dtoConfig, "", " ")
- require.NoError(t, err)
- require.Empty(t, cmp.Diff(opt.expect, string(b)))
- })
- }
-}
diff --git a/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs.go b/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs.go
deleted file mode 100644
index 9b42c841b..000000000
--- a/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Code generated by user config generator. DO NOT EDIT.
-
-package kafkalogs
-
-import (
- "context"
-
- setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator"
- attr "github.com/hashicorp/terraform-plugin-framework/attr"
- datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
- diag "github.com/hashicorp/terraform-plugin-framework/diag"
- resource "github.com/hashicorp/terraform-plugin-framework/resource/schema"
- validator "github.com/hashicorp/terraform-plugin-framework/schema/validator"
- types "github.com/hashicorp/terraform-plugin-framework/types"
-
- schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil"
-)
-
-// NewResourceSchema returns resource schema
-func NewResourceSchema() resource.SetNestedBlock {
- return resource.SetNestedBlock{
- NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{
- "kafka_topic": resource.StringAttribute{
- Description: "Topic name.",
- Required: true,
- },
- "selected_log_fields": resource.SetAttribute{
- Computed: true,
- Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.",
- ElementType: types.StringType,
- Optional: true,
- Validators: []validator.Set{setvalidator.SizeAtMost(5)},
- },
- }},
- Validators: []validator.Set{setvalidator.SizeAtMost(1)},
- }
-}
-
-// NewDataSourceSchema returns datasource schema
-func NewDataSourceSchema() datasource.SetNestedBlock {
- return datasource.SetNestedBlock{
- NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{
- "kafka_topic": datasource.StringAttribute{
- Computed: true,
- Description: "Topic name.",
- },
- "selected_log_fields": datasource.SetAttribute{
- Computed: true,
- Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.",
- ElementType: types.StringType,
- Validators: []validator.Set{setvalidator.SizeAtMost(5)},
- },
- }},
- Validators: []validator.Set{setvalidator.SizeAtMost(1)},
- }
-}
-
-// tfoUserConfig
-type tfoUserConfig struct {
- KafkaTopic types.String `tfsdk:"kafka_topic"`
- SelectedLogFields types.Set `tfsdk:"selected_log_fields"`
-}
-
-// dtoUserConfig request/response object
-type dtoUserConfig struct {
- KafkaTopic string `groups:"create,update" json:"kafka_topic"`
- SelectedLogFields []string `groups:"create,update" json:"selected_log_fields,omitempty"`
-}
-
-// expandUserConfig expands tf object into dto object
-func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig {
- selectedLogFieldsVar := schemautil.ExpandSet[string](ctx, diags, o.SelectedLogFields)
- if diags.HasError() {
- return nil
- }
- return &dtoUserConfig{
- KafkaTopic: o.KafkaTopic.ValueString(),
- SelectedLogFields: selectedLogFieldsVar,
- }
-}
-
-// flattenUserConfig flattens dto object into tf object
-func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig {
- selectedLogFieldsVar, d := types.SetValueFrom(ctx, types.StringType, o.SelectedLogFields)
- diags.Append(d...)
- if diags.HasError() {
- return nil
- }
- return &tfoUserConfig{
- KafkaTopic: types.StringValue(o.KafkaTopic),
- SelectedLogFields: selectedLogFieldsVar,
- }
-}
-
-var userConfigAttrs = map[string]attr.Type{
- "kafka_topic": types.StringType,
- "selected_log_fields": types.SetType{ElemType: types.StringType},
-}
-
-// Expand public function that converts tf object into dto
-func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig {
- return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set)
-}
-
-// Flatten public function that converts dto into tf object
-func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set {
- o := new(dtoUserConfig)
- err := schemautil.MapToDTO(m, o)
- if err != nil {
- diags.AddError("failed to marshal map user config to dto", err.Error())
- return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs})
- }
- return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o)
-}
diff --git a/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs_test.go b/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs_test.go
deleted file mode 100644
index 166dd35a6..000000000
--- a/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Code generated by user config generator. DO NOT EDIT.
-
-package kafkalogs
-
-import (
- "context"
- "encoding/json"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/hashicorp/terraform-plugin-framework/diag"
- "github.com/stretchr/testify/require"
-
- "github.com/aiven/terraform-provider-aiven/internal/schemautil"
-)
-
-const allFields = `{
- "kafka_topic": "foo",
- "selected_log_fields": [
- "foo"
- ]
-}`
-const updateOnlyFields = `{
- "kafka_topic": "foo",
- "selected_log_fields": [
- "foo"
- ]
-}`
-
-func TestUserConfig(t *testing.T) {
- cases := []struct {
- name string
- source string
- expect string
- create bool
- }{
- {
- name: "fields to create resource",
- source: allFields,
- expect: allFields,
- create: true,
- },
- {
- name: "only fields to update resource",
- source: allFields,
- expect: updateOnlyFields, // usually, fewer fields
- create: false,
- },
- }
-
- ctx := context.Background()
- diags := new(diag.Diagnostics)
- for _, opt := range cases {
- t.Run(opt.name, func(t *testing.T) {
- dto := new(dtoUserConfig)
- err := json.Unmarshal([]byte(opt.source), dto)
- require.NoError(t, err)
-
- // From json to TF
- tfo := flattenUserConfig(ctx, diags, dto)
- require.Empty(t, diags)
-
- // From TF to json
- config := expandUserConfig(ctx, diags, tfo)
- require.Empty(t, diags)
-
- // Run specific marshal (create or update resource)
- dtoConfig, err := schemautil.MarshalUserConfig(config, opt.create)
- require.NoError(t, err)
-
- // Compares that output is strictly equal to the input
- // If so, the flow is valid
- b, err := json.MarshalIndent(dtoConfig, "", " ")
- require.NoError(t, err)
- require.Empty(t, cmp.Diff(opt.expect, string(b)))
- })
- }
-}
diff --git a/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker.go b/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker.go
deleted file mode 100644
index de8db7181..000000000
--- a/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker.go
+++ /dev/null
@@ -1,220 +0,0 @@
-// Code generated by user config generator. DO NOT EDIT.
-
-package kafkamirrormaker
-
-import (
- "context"
-
- setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator"
- attr "github.com/hashicorp/terraform-plugin-framework/attr"
- datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
- diag "github.com/hashicorp/terraform-plugin-framework/diag"
- resource "github.com/hashicorp/terraform-plugin-framework/resource/schema"
- validator "github.com/hashicorp/terraform-plugin-framework/schema/validator"
- types "github.com/hashicorp/terraform-plugin-framework/types"
-
- schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil"
-)
-
-// NewResourceSchema returns resource schema
-func NewResourceSchema() resource.SetNestedBlock {
- return resource.SetNestedBlock{
- Description: "Integration user config",
- NestedObject: resource.NestedBlockObject{
- Attributes: map[string]resource.Attribute{"cluster_alias": resource.StringAttribute{
- Computed: true,
- Description: "The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, '.', '_', and '-'.",
- Optional: true,
- }},
- Blocks: map[string]resource.Block{"kafka_mirrormaker": resource.SetNestedBlock{
- Description: "Kafka MirrorMaker configuration values",
- NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{
- "consumer_fetch_min_bytes": resource.Int64Attribute{
- Computed: true,
- Description: "The minimum amount of data the server should return for a fetch request.",
- Optional: true,
- },
- "producer_batch_size": resource.Int64Attribute{
- Computed: true,
- Description: "The batch size in bytes producer will attempt to collect before publishing to broker.",
- Optional: true,
- },
- "producer_buffer_memory": resource.Int64Attribute{
- Computed: true,
- Description: "The amount of bytes producer can use for buffering data before publishing to broker.",
- Optional: true,
- },
- "producer_compression_type": resource.StringAttribute{
- Computed: true,
- Description: "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.",
- Optional: true,
- },
- "producer_linger_ms": resource.Int64Attribute{
- Computed: true,
- Description: "The linger time (ms) for waiting new data to arrive for publishing.",
- Optional: true,
- },
- "producer_max_request_size": resource.Int64Attribute{
- Computed: true,
- Description: "The maximum request size in bytes.",
- Optional: true,
- },
- }},
- }},
- },
- Validators: []validator.Set{setvalidator.SizeAtMost(1)},
- }
-}
-
-// NewDataSourceSchema returns datasource schema
-func NewDataSourceSchema() datasource.SetNestedBlock {
- return datasource.SetNestedBlock{
- Description: "Integration user config",
- NestedObject: datasource.NestedBlockObject{
- Attributes: map[string]datasource.Attribute{"cluster_alias": datasource.StringAttribute{
- Computed: true,
- Description: "The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, '.', '_', and '-'.",
- }},
- Blocks: map[string]datasource.Block{"kafka_mirrormaker": datasource.SetNestedBlock{
- Description: "Kafka MirrorMaker configuration values",
- NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{
- "consumer_fetch_min_bytes": datasource.Int64Attribute{
- Computed: true,
- Description: "The minimum amount of data the server should return for a fetch request.",
- },
- "producer_batch_size": datasource.Int64Attribute{
- Computed: true,
- Description: "The batch size in bytes producer will attempt to collect before publishing to broker.",
- },
- "producer_buffer_memory": datasource.Int64Attribute{
- Computed: true,
- Description: "The amount of bytes producer can use for buffering data before publishing to broker.",
- },
- "producer_compression_type": datasource.StringAttribute{
- Computed: true,
- Description: "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.",
- },
- "producer_linger_ms": datasource.Int64Attribute{
- Computed: true,
- Description: "The linger time (ms) for waiting new data to arrive for publishing.",
- },
- "producer_max_request_size": datasource.Int64Attribute{
- Computed: true,
- Description: "The maximum request size in bytes.",
- },
- }},
- }},
- },
- Validators: []validator.Set{setvalidator.SizeAtMost(1)},
- }
-}
-
-// tfoUserConfig Integration user config
-type tfoUserConfig struct {
- ClusterAlias types.String `tfsdk:"cluster_alias"`
- KafkaMirrormaker types.Set `tfsdk:"kafka_mirrormaker"`
-}
-
-// dtoUserConfig request/response object
-type dtoUserConfig struct {
- ClusterAlias *string `groups:"create,update" json:"cluster_alias,omitempty"`
- KafkaMirrormaker *dtoKafkaMirrormaker `groups:"create,update" json:"kafka_mirrormaker,omitempty"`
-}
-
-// expandUserConfig expands tf object into dto object
-func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig {
- kafkaMirrormakerVar := schemautil.ExpandSetBlockNested[tfoKafkaMirrormaker, dtoKafkaMirrormaker](ctx, diags, expandKafkaMirrormaker, o.KafkaMirrormaker)
- if diags.HasError() {
- return nil
- }
- return &dtoUserConfig{
- ClusterAlias: schemautil.ValueStringPointer(o.ClusterAlias),
- KafkaMirrormaker: kafkaMirrormakerVar,
- }
-}
-
-// flattenUserConfig flattens dto object into tf object
-func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig {
- kafkaMirrormakerVar := schemautil.FlattenSetBlockNested[dtoKafkaMirrormaker, tfoKafkaMirrormaker](ctx, diags, flattenKafkaMirrormaker, kafkaMirrormakerAttrs, o.KafkaMirrormaker)
- if diags.HasError() {
- return nil
- }
- return &tfoUserConfig{
- ClusterAlias: types.StringPointerValue(o.ClusterAlias),
- KafkaMirrormaker: kafkaMirrormakerVar,
- }
-}
-
-var userConfigAttrs = map[string]attr.Type{
- "cluster_alias": types.StringType,
- "kafka_mirrormaker": types.SetType{ElemType: types.ObjectType{AttrTypes: kafkaMirrormakerAttrs}},
-}
-
-// tfoKafkaMirrormaker Kafka MirrorMaker configuration values
-type tfoKafkaMirrormaker struct {
- ConsumerFetchMinBytes types.Int64 `tfsdk:"consumer_fetch_min_bytes"`
- ProducerBatchSize types.Int64 `tfsdk:"producer_batch_size"`
- ProducerBufferMemory types.Int64 `tfsdk:"producer_buffer_memory"`
- ProducerCompressionType types.String `tfsdk:"producer_compression_type"`
- ProducerLingerMs types.Int64 `tfsdk:"producer_linger_ms"`
- ProducerMaxRequestSize types.Int64 `tfsdk:"producer_max_request_size"`
-}
-
-// dtoKafkaMirrormaker request/response object
-type dtoKafkaMirrormaker struct {
- ConsumerFetchMinBytes *int64 `groups:"create,update" json:"consumer_fetch_min_bytes,omitempty"`
- ProducerBatchSize *int64 `groups:"create,update" json:"producer_batch_size,omitempty"`
- ProducerBufferMemory *int64 `groups:"create,update" json:"producer_buffer_memory,omitempty"`
- ProducerCompressionType *string `groups:"create,update" json:"producer_compression_type,omitempty"`
- ProducerLingerMs *int64 `groups:"create,update" json:"producer_linger_ms,omitempty"`
- ProducerMaxRequestSize *int64 `groups:"create,update" json:"producer_max_request_size,omitempty"`
-}
-
-// expandKafkaMirrormaker expands tf object into dto object
-func expandKafkaMirrormaker(ctx context.Context, diags *diag.Diagnostics, o *tfoKafkaMirrormaker) *dtoKafkaMirrormaker {
- return &dtoKafkaMirrormaker{
- ConsumerFetchMinBytes: schemautil.ValueInt64Pointer(o.ConsumerFetchMinBytes),
- ProducerBatchSize: schemautil.ValueInt64Pointer(o.ProducerBatchSize),
- ProducerBufferMemory: schemautil.ValueInt64Pointer(o.ProducerBufferMemory),
- ProducerCompressionType: schemautil.ValueStringPointer(o.ProducerCompressionType),
- ProducerLingerMs: schemautil.ValueInt64Pointer(o.ProducerLingerMs),
- ProducerMaxRequestSize: schemautil.ValueInt64Pointer(o.ProducerMaxRequestSize),
- }
-}
-
-// flattenKafkaMirrormaker flattens dto object into tf object
-func flattenKafkaMirrormaker(ctx context.Context, diags *diag.Diagnostics, o *dtoKafkaMirrormaker) *tfoKafkaMirrormaker {
- return &tfoKafkaMirrormaker{
- ConsumerFetchMinBytes: types.Int64PointerValue(o.ConsumerFetchMinBytes),
- ProducerBatchSize: types.Int64PointerValue(o.ProducerBatchSize),
- ProducerBufferMemory: types.Int64PointerValue(o.ProducerBufferMemory),
- ProducerCompressionType: types.StringPointerValue(o.ProducerCompressionType),
- ProducerLingerMs: types.Int64PointerValue(o.ProducerLingerMs),
- ProducerMaxRequestSize: types.Int64PointerValue(o.ProducerMaxRequestSize),
- }
-}
-
-var kafkaMirrormakerAttrs = map[string]attr.Type{
- "consumer_fetch_min_bytes": types.Int64Type,
- "producer_batch_size": types.Int64Type,
- "producer_buffer_memory": types.Int64Type,
- "producer_compression_type": types.StringType,
- "producer_linger_ms": types.Int64Type,
- "producer_max_request_size": types.Int64Type,
-}
-
-// Expand public function that converts tf object into dto
-func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig {
- return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set)
-}
-
-// Flatten public function that converts dto into tf object
-func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set {
- o := new(dtoUserConfig)
- err := schemautil.MapToDTO(m, o)
- if err != nil {
- diags.AddError("failed to marshal map user config to dto", err.Error())
- return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs})
- }
- return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o)
-}
diff --git a/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker_test.go b/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker_test.go
deleted file mode 100644
index 611545286..000000000
--- a/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker_test.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// Code generated by user config generator. DO NOT EDIT.
-
-package kafkamirrormaker
-
-import (
- "context"
- "encoding/json"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/hashicorp/terraform-plugin-framework/diag"
- "github.com/stretchr/testify/require"
-
- "github.com/aiven/terraform-provider-aiven/internal/schemautil"
-)
-
-const allFields = `{
- "cluster_alias": "foo",
- "kafka_mirrormaker": {
- "consumer_fetch_min_bytes": 1,
- "producer_batch_size": 1,
- "producer_buffer_memory": 1,
- "producer_compression_type": "foo",
- "producer_linger_ms": 1,
- "producer_max_request_size": 1
- }
-}`
-const updateOnlyFields = `{
- "cluster_alias": "foo",
- "kafka_mirrormaker": {
- "consumer_fetch_min_bytes": 1,
- "producer_batch_size": 1,
- "producer_buffer_memory": 1,
- "producer_compression_type": "foo",
- "producer_linger_ms": 1,
- "producer_max_request_size": 1
- }
-}`
-
-func TestUserConfig(t *testing.T) {
- cases := []struct {
- name string
- source string
- expect string
- create bool
- }{
- {
- name: "fields to create resource",
- source: allFields,
- expect: allFields,
- create: true,
- },
- {
- name: "only fields to update resource",
- source: allFields,
- expect: updateOnlyFields, // usually, fewer fields
- create: false,
- },
- }
-
- ctx := context.Background()
- diags := new(diag.Diagnostics)
- for _, opt := range cases {
- t.Run(opt.name, func(t *testing.T) {
- dto := new(dtoUserConfig)
- err := json.Unmarshal([]byte(opt.source), dto)
- require.NoError(t, err)
-
- // From json to TF
- tfo := flattenUserConfig(ctx, diags, dto)
- require.Empty(t, diags)
-
- // From TF to json
- config := expandUserConfig(ctx, diags, tfo)
- require.Empty(t, diags)
-
- // Run specific marshal (create or update resource)
- dtoConfig, err := schemautil.MarshalUserConfig(config, opt.create)
- require.NoError(t, err)
-
- // Compares that output is strictly equal to the input
- // If so, the flow is valid
- b, err := json.MarshalIndent(dtoConfig, "", " ")
- require.NoError(t, err)
- require.Empty(t, cmp.Diff(opt.expect, string(b)))
- })
- }
-}
diff --git a/internal/plugin/service/userconfig/integration/logs/logs.go b/internal/plugin/service/userconfig/integration/logs/logs.go
deleted file mode 100644
index 1916dcdcc..000000000
--- a/internal/plugin/service/userconfig/integration/logs/logs.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Code generated by user config generator. DO NOT EDIT.
-
-package logs
-
-import (
- "context"
-
- setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator"
- attr "github.com/hashicorp/terraform-plugin-framework/attr"
- datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
- diag "github.com/hashicorp/terraform-plugin-framework/diag"
- resource "github.com/hashicorp/terraform-plugin-framework/resource/schema"
- int64default "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64default"
- stringdefault "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault"
- validator "github.com/hashicorp/terraform-plugin-framework/schema/validator"
- types "github.com/hashicorp/terraform-plugin-framework/types"
-
- schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil"
-)
-
-// NewResourceSchema returns resource schema
-func NewResourceSchema() resource.SetNestedBlock {
- return resource.SetNestedBlock{
- NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{
- "elasticsearch_index_days_max": resource.Int64Attribute{
- Computed: true,
- Default: int64default.StaticInt64(3),
- Description: "Elasticsearch index retention limit. The default value is `3`.",
- Optional: true,
- },
- "elasticsearch_index_prefix": resource.StringAttribute{
- Computed: true,
- Default: stringdefault.StaticString("logs"),
- Description: "Elasticsearch index prefix. The default value is `logs`.",
- Optional: true,
- },
- "selected_log_fields": resource.SetAttribute{
- Computed: true,
- Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.",
- ElementType: types.StringType,
- Optional: true,
- Validators: []validator.Set{setvalidator.SizeAtMost(5)},
- },
- }},
- Validators: []validator.Set{setvalidator.SizeAtMost(1)},
- }
-}
-
-// NewDataSourceSchema returns datasource schema
-func NewDataSourceSchema() datasource.SetNestedBlock {
- return datasource.SetNestedBlock{
- NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{
- "elasticsearch_index_days_max": datasource.Int64Attribute{
- Computed: true,
- Description: "Elasticsearch index retention limit. The default value is `3`.",
- },
- "elasticsearch_index_prefix": datasource.StringAttribute{
- Computed: true,
- Description: "Elasticsearch index prefix. The default value is `logs`.",
- },
- "selected_log_fields": datasource.SetAttribute{
- Computed: true,
- Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.",
- ElementType: types.StringType,
- Validators: []validator.Set{setvalidator.SizeAtMost(5)},
- },
- }},
- Validators: []validator.Set{setvalidator.SizeAtMost(1)},
- }
-}
-
-// tfoUserConfig
-type tfoUserConfig struct {
- ElasticsearchIndexDaysMax types.Int64 `tfsdk:"elasticsearch_index_days_max"`
- ElasticsearchIndexPrefix types.String `tfsdk:"elasticsearch_index_prefix"`
- SelectedLogFields types.Set `tfsdk:"selected_log_fields"`
-}
-
-// dtoUserConfig request/response object
-type dtoUserConfig struct {
- ElasticsearchIndexDaysMax *int64 `groups:"create,update" json:"elasticsearch_index_days_max,omitempty"`
- ElasticsearchIndexPrefix *string `groups:"create,update" json:"elasticsearch_index_prefix,omitempty"`
- SelectedLogFields []string `groups:"create,update" json:"selected_log_fields,omitempty"`
-}
-
-// expandUserConfig expands tf object into dto object
-func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig {
- selectedLogFieldsVar := schemautil.ExpandSet[string](ctx, diags, o.SelectedLogFields)
- if diags.HasError() {
- return nil
- }
- return &dtoUserConfig{
- ElasticsearchIndexDaysMax: schemautil.ValueInt64Pointer(o.ElasticsearchIndexDaysMax),
- ElasticsearchIndexPrefix: schemautil.ValueStringPointer(o.ElasticsearchIndexPrefix),
- SelectedLogFields: selectedLogFieldsVar,
- }
-}
-
-// flattenUserConfig flattens dto object into tf object
-func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig {
- selectedLogFieldsVar, d := types.SetValueFrom(ctx, types.StringType, o.SelectedLogFields)
- diags.Append(d...)
- if diags.HasError() {
- return nil
- }
- return &tfoUserConfig{
- ElasticsearchIndexDaysMax: types.Int64PointerValue(o.ElasticsearchIndexDaysMax),
- ElasticsearchIndexPrefix: types.StringPointerValue(o.ElasticsearchIndexPrefix),
- SelectedLogFields: selectedLogFieldsVar,
- }
-}
-
-var userConfigAttrs = map[string]attr.Type{
- "elasticsearch_index_days_max": types.Int64Type,
- "elasticsearch_index_prefix": types.StringType,
- "selected_log_fields": types.SetType{ElemType: types.StringType},
-}
-
-// Expand public function that converts tf object into dto
-func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig {
- return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set)
-}
-
-// Flatten public function that converts dto into tf object
-func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set {
- o := new(dtoUserConfig)
- err := schemautil.MapToDTO(m, o)
- if err != nil {
- diags.AddError("failed to marshal map user config to dto", err.Error())
- return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs})
- }
- return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o)
-}
diff --git a/internal/plugin/service/userconfig/integration/logs/logs_test.go b/internal/plugin/service/userconfig/integration/logs/logs_test.go
deleted file mode 100644
index bab47414f..000000000
--- a/internal/plugin/service/userconfig/integration/logs/logs_test.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Code generated by user config generator. DO NOT EDIT.
-
-package logs
-
-import (
- "context"
- "encoding/json"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/hashicorp/terraform-plugin-framework/diag"
- "github.com/stretchr/testify/require"
-
- "github.com/aiven/terraform-provider-aiven/internal/schemautil"
-)
-
-const allFields = `{
- "elasticsearch_index_days_max": 1,
- "elasticsearch_index_prefix": "foo",
- "selected_log_fields": [
- "foo"
- ]
-}`
-const updateOnlyFields = `{
- "elasticsearch_index_days_max": 1,
- "elasticsearch_index_prefix": "foo",
- "selected_log_fields": [
- "foo"
- ]
-}`
-
-func TestUserConfig(t *testing.T) {
- cases := []struct {
- name string
- source string
- expect string
- create bool
- }{
- {
- name: "fields to create resource",
- source: allFields,
- expect: allFields,
- create: true,
- },
- {
- name: "only fields to update resource",
- source: allFields,
- expect: updateOnlyFields, // usually, fewer fields
- create: false,
- },
- }
-
- ctx := context.Background()
- diags := new(diag.Diagnostics)
- for _, opt := range cases {
- t.Run(opt.name, func(t *testing.T) {
- dto := new(dtoUserConfig)
- err := json.Unmarshal([]byte(opt.source), dto)
- require.NoError(t, err)
-
- // From json to TF
- tfo := flattenUserConfig(ctx, diags, dto)
- require.Empty(t, diags)
-
- // From TF to json
- config := expandUserConfig(ctx, diags, tfo)
- require.Empty(t, diags)
-
- // Run specific marshal (create or update resource)
- dtoConfig, err := schemautil.MarshalUserConfig(config, opt.create)
- require.NoError(t, err)
-
- // Compares that output is strictly equal to the input
- // If so, the flow is valid
- b, err := json.MarshalIndent(dtoConfig, "", " ")
- require.NoError(t, err)
- require.Empty(t, cmp.Diff(opt.expect, string(b)))
- })
- }
-}
diff --git a/internal/plugin/service/userconfig/integration/metrics/metrics.go b/internal/plugin/service/userconfig/integration/metrics/metrics.go
deleted file mode 100644
index 3fb712119..000000000
--- a/internal/plugin/service/userconfig/integration/metrics/metrics.go
+++ /dev/null
@@ -1,414 +0,0 @@
-// Code generated by user config generator. DO NOT EDIT.
-
-package metrics
-
-import (
- "context"
-
- setvalidator "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator"
- attr "github.com/hashicorp/terraform-plugin-framework/attr"
- datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
- diag "github.com/hashicorp/terraform-plugin-framework/diag"
- resource "github.com/hashicorp/terraform-plugin-framework/resource/schema"
- validator "github.com/hashicorp/terraform-plugin-framework/schema/validator"
- types "github.com/hashicorp/terraform-plugin-framework/types"
-
- schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil"
-)
-
-// NewResourceSchema returns resource schema
-func NewResourceSchema() resource.SetNestedBlock {
- return resource.SetNestedBlock{
- Description: "Integration user config",
- NestedObject: resource.NestedBlockObject{
- Attributes: map[string]resource.Attribute{
- "database": resource.StringAttribute{
- Computed: true,
- Description: "Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to 'metrics'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.",
- Optional: true,
- },
- "retention_days": resource.Int64Attribute{
- Computed: true,
- Description: "Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.",
- Optional: true,
- },
- "ro_username": resource.StringAttribute{
- Computed: true,
- Description: "Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to 'metrics_reader'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.",
- Optional: true,
- },
- "username": resource.StringAttribute{
- Computed: true,
- Description: "Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to 'metrics_writer'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.",
- Optional: true,
- },
- },
- Blocks: map[string]resource.Block{"source_mysql": resource.SetNestedBlock{
- Description: "Configuration options for metrics where source service is MySQL",
- NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"telegraf": resource.SetNestedBlock{
- Description: "Configuration options for Telegraf MySQL input plugin",
- NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{
- "gather_event_waits": resource.BoolAttribute{
- Computed: true,
- Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.",
- Optional: true,
- },
- "gather_file_events_stats": resource.BoolAttribute{
- Computed: true,
- Description: "gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.",
- Optional: true,
- },
- "gather_index_io_waits": resource.BoolAttribute{
- Computed: true,
- Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.",
- Optional: true,
- },
- "gather_info_schema_auto_inc": resource.BoolAttribute{
- Computed: true,
- Description: "Gather auto_increment columns and max values from information schema.",
- Optional: true,
- },
- "gather_innodb_metrics": resource.BoolAttribute{
- Computed: true,
- Description: "Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.",
- Optional: true,
- },
- "gather_perf_events_statements": resource.BoolAttribute{
- Computed: true,
- Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.",
- Optional: true,
- },
- "gather_process_list": resource.BoolAttribute{
- Computed: true,
- Description: "Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.",
- Optional: true,
- },
- "gather_slave_status": resource.BoolAttribute{
- Computed: true,
- Description: "Gather metrics from SHOW SLAVE STATUS command output.",
- Optional: true,
- },
- "gather_table_io_waits": resource.BoolAttribute{
- Computed: true,
- Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.",
- Optional: true,
- },
- "gather_table_lock_waits": resource.BoolAttribute{
- Computed: true,
- Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.",
- Optional: true,
- },
- "gather_table_schema": resource.BoolAttribute{
- Computed: true,
- Description: "Gather metrics from INFORMATION_SCHEMA.TABLES.",
- Optional: true,
- },
- "perf_events_statements_digest_text_limit": resource.Int64Attribute{
- Computed: true,
- Description: "Truncates digest text from perf_events_statements into this many characters.",
- Optional: true,
- },
- "perf_events_statements_limit": resource.Int64Attribute{
- Computed: true,
- Description: "Limits metrics from perf_events_statements.",
- Optional: true,
- },
- "perf_events_statements_time_limit": resource.Int64Attribute{
- Computed: true,
- Description: "Only include perf_events_statements whose last seen is less than this many seconds.",
- Optional: true,
- },
- }},
- }}},
- }},
- },
- Validators: []validator.Set{setvalidator.SizeAtMost(1)},
- }
-}
-
-// NewDataSourceSchema returns datasource schema
-func NewDataSourceSchema() datasource.SetNestedBlock {
- return datasource.SetNestedBlock{
- Description: "Integration user config",
- NestedObject: datasource.NestedBlockObject{
- Attributes: map[string]datasource.Attribute{
- "database": datasource.StringAttribute{
- Computed: true,
- Description: "Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to 'metrics'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.",
- },
- "retention_days": datasource.Int64Attribute{
- Computed: true,
- Description: "Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.",
- },
- "ro_username": datasource.StringAttribute{
- Computed: true,
- Description: "Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to 'metrics_reader'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.",
- },
- "username": datasource.StringAttribute{
- Computed: true,
- Description: "Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to 'metrics_writer'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.",
- },
- },
- Blocks: map[string]datasource.Block{"source_mysql": datasource.SetNestedBlock{
- Description: "Configuration options for metrics where source service is MySQL",
- NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"telegraf": datasource.SetNestedBlock{
- Description: "Configuration options for Telegraf MySQL input plugin",
- NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{
- "gather_event_waits": datasource.BoolAttribute{
- Computed: true,
- Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.",
- },
- "gather_file_events_stats": datasource.BoolAttribute{
- Computed: true,
- Description: "gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.",
- },
- "gather_index_io_waits": datasource.BoolAttribute{
- Computed: true,
- Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.",
- },
- "gather_info_schema_auto_inc": datasource.BoolAttribute{
- Computed: true,
- Description: "Gather auto_increment columns and max values from information schema.",
- },
- "gather_innodb_metrics": datasource.BoolAttribute{
- Computed: true,
- Description: "Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.",
- },
- "gather_perf_events_statements": datasource.BoolAttribute{
- Computed: true,
- Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.",
- },
- "gather_process_list": datasource.BoolAttribute{
- Computed: true,
- Description: "Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.",
- },
- "gather_slave_status": datasource.BoolAttribute{
- Computed: true,
- Description: "Gather metrics from SHOW SLAVE STATUS command output.",
- },
- "gather_table_io_waits": datasource.BoolAttribute{
- Computed: true,
- Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.",
- },
- "gather_table_lock_waits": datasource.BoolAttribute{
- Computed: true,
- Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.",
- },
- "gather_table_schema": datasource.BoolAttribute{
- Computed: true,
- Description: "Gather metrics from INFORMATION_SCHEMA.TABLES.",
- },
- "perf_events_statements_digest_text_limit": datasource.Int64Attribute{
- Computed: true,
- Description: "Truncates digest text from perf_events_statements into this many characters.",
- },
- "perf_events_statements_limit": datasource.Int64Attribute{
- Computed: true,
- Description: "Limits metrics from perf_events_statements.",
- },
- "perf_events_statements_time_limit": datasource.Int64Attribute{
- Computed: true,
- Description: "Only include perf_events_statements whose last seen is less than this many seconds.",
- },
- }},
- }}},
- }},
- },
- Validators: []validator.Set{setvalidator.SizeAtMost(1)},
- }
-}
-
-// tfoUserConfig Integration user config
-type tfoUserConfig struct {
- Database types.String `tfsdk:"database"`
- RetentionDays types.Int64 `tfsdk:"retention_days"`
- RoUsername types.String `tfsdk:"ro_username"`
- SourceMysql types.Set `tfsdk:"source_mysql"`
- Username types.String `tfsdk:"username"`
-}
-
-// dtoUserConfig request/response object
-type dtoUserConfig struct {
- Database *string `groups:"create,update" json:"database,omitempty"`
- RetentionDays *int64 `groups:"create,update" json:"retention_days,omitempty"`
- RoUsername *string `groups:"create,update" json:"ro_username,omitempty"`
- SourceMysql *dtoSourceMysql `groups:"create,update" json:"source_mysql,omitempty"`
- Username *string `groups:"create,update" json:"username,omitempty"`
-}
-
-// expandUserConfig expands tf object into dto object
-func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig {
- sourceMysqlVar := schemautil.ExpandSetBlockNested[tfoSourceMysql, dtoSourceMysql](ctx, diags, expandSourceMysql, o.SourceMysql)
- if diags.HasError() {
- return nil
- }
- return &dtoUserConfig{
- Database: schemautil.ValueStringPointer(o.Database),
- RetentionDays: schemautil.ValueInt64Pointer(o.RetentionDays),
- RoUsername: schemautil.ValueStringPointer(o.RoUsername),
- SourceMysql: sourceMysqlVar,
- Username: schemautil.ValueStringPointer(o.Username),
- }
-}
-
-// flattenUserConfig flattens dto object into tf object
-func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig {
- sourceMysqlVar := schemautil.FlattenSetBlockNested[dtoSourceMysql, tfoSourceMysql](ctx, diags, flattenSourceMysql, sourceMysqlAttrs, o.SourceMysql)
- if diags.HasError() {
- return nil
- }
- return &tfoUserConfig{
- Database: types.StringPointerValue(o.Database),
- RetentionDays: types.Int64PointerValue(o.RetentionDays),
- RoUsername: types.StringPointerValue(o.RoUsername),
- SourceMysql: sourceMysqlVar,
- Username: types.StringPointerValue(o.Username),
- }
-}
-
-var userConfigAttrs = map[string]attr.Type{
- "database": types.StringType,
- "retention_days": types.Int64Type,
- "ro_username": types.StringType,
- "source_mysql": types.SetType{ElemType: types.ObjectType{AttrTypes: sourceMysqlAttrs}},
- "username": types.StringType,
-}
-
-// tfoSourceMysql Configuration options for metrics where source service is MySQL
-type tfoSourceMysql struct {
- Telegraf types.Set `tfsdk:"telegraf"`
-}
-
-// dtoSourceMysql request/response object
-type dtoSourceMysql struct {
- Telegraf *dtoTelegraf `groups:"create,update" json:"telegraf,omitempty"`
-}
-
-// expandSourceMysql expands tf object into dto object
-func expandSourceMysql(ctx context.Context, diags *diag.Diagnostics, o *tfoSourceMysql) *dtoSourceMysql {
- telegrafVar := schemautil.ExpandSetBlockNested[tfoTelegraf, dtoTelegraf](ctx, diags, expandTelegraf, o.Telegraf)
- if diags.HasError() {
- return nil
- }
- return &dtoSourceMysql{Telegraf: telegrafVar}
-}
-
-// flattenSourceMysql flattens dto object into tf object
-func flattenSourceMysql(ctx context.Context, diags *diag.Diagnostics, o *dtoSourceMysql) *tfoSourceMysql {
- telegrafVar := schemautil.FlattenSetBlockNested[dtoTelegraf, tfoTelegraf](ctx, diags, flattenTelegraf, telegrafAttrs, o.Telegraf)
- if diags.HasError() {
- return nil
- }
- return &tfoSourceMysql{Telegraf: telegrafVar}
-}
-
-var sourceMysqlAttrs = map[string]attr.Type{"telegraf": types.SetType{ElemType: types.ObjectType{AttrTypes: telegrafAttrs}}}
-
-// tfoTelegraf Configuration options for Telegraf MySQL input plugin
-type tfoTelegraf struct {
- GatherEventWaits types.Bool `tfsdk:"gather_event_waits"`
- GatherFileEventsStats types.Bool `tfsdk:"gather_file_events_stats"`
- GatherIndexIoWaits types.Bool `tfsdk:"gather_index_io_waits"`
- GatherInfoSchemaAutoInc types.Bool `tfsdk:"gather_info_schema_auto_inc"`
- GatherInnodbMetrics types.Bool `tfsdk:"gather_innodb_metrics"`
- GatherPerfEventsStatements types.Bool `tfsdk:"gather_perf_events_statements"`
- GatherProcessList types.Bool `tfsdk:"gather_process_list"`
- GatherSlaveStatus types.Bool `tfsdk:"gather_slave_status"`
- GatherTableIoWaits types.Bool `tfsdk:"gather_table_io_waits"`
- GatherTableLockWaits types.Bool `tfsdk:"gather_table_lock_waits"`
- GatherTableSchema types.Bool `tfsdk:"gather_table_schema"`
- PerfEventsStatementsDigestTextLimit types.Int64 `tfsdk:"perf_events_statements_digest_text_limit"`
- PerfEventsStatementsLimit types.Int64 `tfsdk:"perf_events_statements_limit"`
- PerfEventsStatementsTimeLimit types.Int64 `tfsdk:"perf_events_statements_time_limit"`
-}
-
-// dtoTelegraf request/response object
-type dtoTelegraf struct {
- GatherEventWaits *bool `groups:"create,update" json:"gather_event_waits,omitempty"`
- GatherFileEventsStats *bool `groups:"create,update" json:"gather_file_events_stats,omitempty"`
- GatherIndexIoWaits *bool `groups:"create,update" json:"gather_index_io_waits,omitempty"`
- GatherInfoSchemaAutoInc *bool `groups:"create,update" json:"gather_info_schema_auto_inc,omitempty"`
- GatherInnodbMetrics *bool `groups:"create,update" json:"gather_innodb_metrics,omitempty"`
- GatherPerfEventsStatements *bool `groups:"create,update" json:"gather_perf_events_statements,omitempty"`
- GatherProcessList *bool `groups:"create,update" json:"gather_process_list,omitempty"`
- GatherSlaveStatus *bool `groups:"create,update" json:"gather_slave_status,omitempty"`
- GatherTableIoWaits *bool `groups:"create,update" json:"gather_table_io_waits,omitempty"`
- GatherTableLockWaits *bool `groups:"create,update" json:"gather_table_lock_waits,omitempty"`
- GatherTableSchema *bool `groups:"create,update" json:"gather_table_schema,omitempty"`
- PerfEventsStatementsDigestTextLimit *int64 `groups:"create,update" json:"perf_events_statements_digest_text_limit,omitempty"`
- PerfEventsStatementsLimit *int64 `groups:"create,update" json:"perf_events_statements_limit,omitempty"`
- PerfEventsStatementsTimeLimit *int64 `groups:"create,update" json:"perf_events_statements_time_limit,omitempty"`
-}
-
-// expandTelegraf expands tf object into dto object
-func expandTelegraf(ctx context.Context, diags *diag.Diagnostics, o *tfoTelegraf) *dtoTelegraf {
- return &dtoTelegraf{
- GatherEventWaits: schemautil.ValueBoolPointer(o.GatherEventWaits),
- GatherFileEventsStats: schemautil.ValueBoolPointer(o.GatherFileEventsStats),
- GatherIndexIoWaits: schemautil.ValueBoolPointer(o.GatherIndexIoWaits),
- GatherInfoSchemaAutoInc: schemautil.ValueBoolPointer(o.GatherInfoSchemaAutoInc),
- GatherInnodbMetrics: schemautil.ValueBoolPointer(o.GatherInnodbMetrics),
- GatherPerfEventsStatements: schemautil.ValueBoolPointer(o.GatherPerfEventsStatements),
- GatherProcessList: schemautil.ValueBoolPointer(o.GatherProcessList),
- GatherSlaveStatus: schemautil.ValueBoolPointer(o.GatherSlaveStatus),
- GatherTableIoWaits: schemautil.ValueBoolPointer(o.GatherTableIoWaits),
- GatherTableLockWaits: schemautil.ValueBoolPointer(o.GatherTableLockWaits),
- GatherTableSchema: schemautil.ValueBoolPointer(o.GatherTableSchema),
- PerfEventsStatementsDigestTextLimit: schemautil.ValueInt64Pointer(o.PerfEventsStatementsDigestTextLimit),
- PerfEventsStatementsLimit: schemautil.ValueInt64Pointer(o.PerfEventsStatementsLimit),
- PerfEventsStatementsTimeLimit: schemautil.ValueInt64Pointer(o.PerfEventsStatementsTimeLimit),
- }
-}
-
-// flattenTelegraf flattens dto object into tf object
-func flattenTelegraf(ctx context.Context, diags *diag.Diagnostics, o *dtoTelegraf) *tfoTelegraf {
- return &tfoTelegraf{
- GatherEventWaits: types.BoolPointerValue(o.GatherEventWaits),
- GatherFileEventsStats: types.BoolPointerValue(o.GatherFileEventsStats),
- GatherIndexIoWaits: types.BoolPointerValue(o.GatherIndexIoWaits),
- GatherInfoSchemaAutoInc: types.BoolPointerValue(o.GatherInfoSchemaAutoInc),
- GatherInnodbMetrics: types.BoolPointerValue(o.GatherInnodbMetrics),
- GatherPerfEventsStatements: types.BoolPointerValue(o.GatherPerfEventsStatements),
- GatherProcessList: types.BoolPointerValue(o.GatherProcessList),
- GatherSlaveStatus: types.BoolPointerValue(o.GatherSlaveStatus),
- GatherTableIoWaits: types.BoolPointerValue(o.GatherTableIoWaits),
- GatherTableLockWaits: types.BoolPointerValue(o.GatherTableLockWaits),
- GatherTableSchema: types.BoolPointerValue(o.GatherTableSchema),
- PerfEventsStatementsDigestTextLimit: types.Int64PointerValue(o.PerfEventsStatementsDigestTextLimit),
- PerfEventsStatementsLimit: types.Int64PointerValue(o.PerfEventsStatementsLimit),
- PerfEventsStatementsTimeLimit: types.Int64PointerValue(o.PerfEventsStatementsTimeLimit),
- }
-}
-
-var telegrafAttrs = map[string]attr.Type{
- "gather_event_waits": types.BoolType,
- "gather_file_events_stats": types.BoolType,
- "gather_index_io_waits": types.BoolType,
- "gather_info_schema_auto_inc": types.BoolType,
- "gather_innodb_metrics": types.BoolType,
- "gather_perf_events_statements": types.BoolType,
- "gather_process_list": types.BoolType,
- "gather_slave_status": types.BoolType,
- "gather_table_io_waits": types.BoolType,
- "gather_table_lock_waits": types.BoolType,
- "gather_table_schema": types.BoolType,
- "perf_events_statements_digest_text_limit": types.Int64Type,
- "perf_events_statements_limit": types.Int64Type,
- "perf_events_statements_time_limit": types.Int64Type,
-}
-
-// Expand public function that converts tf object into dto
-func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig {
- return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set)
-}
-
-// Flatten public function that converts dto into tf object
-func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set {
- o := new(dtoUserConfig)
- err := schemautil.MapToDTO(m, o)
- if err != nil {
- diags.AddError("failed to marshal map user config to dto", err.Error())
- return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs})
- }
- return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o)
-}
diff --git a/internal/plugin/service/userconfig/integration/metrics/metrics_test.go b/internal/plugin/service/userconfig/integration/metrics/metrics_test.go
deleted file mode 100644
index f20d69dd9..000000000
--- a/internal/plugin/service/userconfig/integration/metrics/metrics_test.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Code generated by user config generator. DO NOT EDIT.
-
-package metrics
-
-import (
- "context"
- "encoding/json"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/hashicorp/terraform-plugin-framework/diag"
- "github.com/stretchr/testify/require"
-
- "github.com/aiven/terraform-provider-aiven/internal/schemautil"
-)
-
-const allFields = `{
- "database": "foo",
- "retention_days": 1,
- "ro_username": "foo",
- "source_mysql": {
- "telegraf": {
- "gather_event_waits": true,
- "gather_file_events_stats": true,
- "gather_index_io_waits": true,
- "gather_info_schema_auto_inc": true,
- "gather_innodb_metrics": true,
- "gather_perf_events_statements": true,
- "gather_process_list": true,
- "gather_slave_status": true,
- "gather_table_io_waits": true,
- "gather_table_lock_waits": true,
- "gather_table_schema": true,
- "perf_events_statements_digest_text_limit": 1,
- "perf_events_statements_limit": 1,
- "perf_events_statements_time_limit": 1
- }
- },
- "username": "foo"
-}`
-const updateOnlyFields = `{
- "database": "foo",
- "retention_days": 1,
- "ro_username": "foo",
- "source_mysql": {
- "telegraf": {
- "gather_event_waits": true,
- "gather_file_events_stats": true,
- "gather_index_io_waits": true,
- "gather_info_schema_auto_inc": true,
- "gather_innodb_metrics": true,
- "gather_perf_events_statements": true,
- "gather_process_list": true,
- "gather_slave_status": true,
- "gather_table_io_waits": true,
- "gather_table_lock_waits": true,
- "gather_table_schema": true,
- "perf_events_statements_digest_text_limit": 1,
- "perf_events_statements_limit": 1,
- "perf_events_statements_time_limit": 1
- }
- },
- "username": "foo"
-}`
-
-func TestUserConfig(t *testing.T) {
- cases := []struct {
- name string
- source string
- expect string
- create bool
- }{
- {
- name: "fields to create resource",
- source: allFields,
- expect: allFields,
- create: true,
- },
- {
- name: "only fields to update resource",
- source: allFields,
- expect: updateOnlyFields, // usually, fewer fields
- create: false,
- },
- }
-
- ctx := context.Background()
- diags := new(diag.Diagnostics)
- for _, opt := range cases {
- t.Run(opt.name, func(t *testing.T) {
- dto := new(dtoUserConfig)
- err := json.Unmarshal([]byte(opt.source), dto)
- require.NoError(t, err)
-
- // From json to TF
- tfo := flattenUserConfig(ctx, diags, dto)
- require.Empty(t, diags)
-
- // From TF to json
- config := expandUserConfig(ctx, diags, tfo)
- require.Empty(t, diags)
-
- // Run specific marshal (create or update resource)
- dtoConfig, err := schemautil.MarshalUserConfig(config, opt.create)
- require.NoError(t, err)
-
- // Compares that output is strictly equal to the input
- // If so, the flow is valid
- b, err := json.MarshalIndent(dtoConfig, "", " ")
- require.NoError(t, err)
- require.Empty(t, cmp.Diff(opt.expect, string(b)))
- })
- }
-}
diff --git a/internal/sdkprovider/provider/provider.go b/internal/sdkprovider/provider/provider.go
index 1dd0006a4..1628932b3 100644
--- a/internal/sdkprovider/provider/provider.go
+++ b/internal/sdkprovider/provider/provider.go
@@ -104,6 +104,7 @@ func Provider(version string) *schema.Provider {
"aiven_transit_gateway_vpc_attachment": vpc.DatasourceTransitGatewayVPCAttachment(),
// service integrations
+ "aiven_service_integration": serviceintegration.DatasourceServiceIntegration(),
"aiven_service_integration_endpoint": serviceintegration.DatasourceServiceIntegrationEndpoint(),
// m3db
@@ -202,6 +203,7 @@ func Provider(version string) *schema.Provider {
"aiven_transit_gateway_vpc_attachment": vpc.ResourceTransitGatewayVPCAttachment(),
// service integrations
+ "aiven_service_integration": serviceintegration.ResourceServiceIntegration(),
"aiven_service_integration_endpoint": serviceintegration.ResourceServiceIntegrationEndpoint(),
// m3db
diff --git a/internal/sdkprovider/service/serviceintegration/service_integration.go b/internal/sdkprovider/service/serviceintegration/service_integration.go
new file mode 100644
index 000000000..72a4e7042
--- /dev/null
+++ b/internal/sdkprovider/service/serviceintegration/service_integration.go
@@ -0,0 +1,398 @@
+package serviceintegration
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "regexp"
+ "time"
+
+ "github.com/aiven/aiven-go-client/v2"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
+ "github.com/hashicorp/terraform-plugin-testing/helper/resource"
+
+ "github.com/aiven/terraform-provider-aiven/internal/schemautil"
+ "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig"
+ "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/apiconvert"
+ "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist"
+ "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader"
+)
+
+const serviceIntegrationEndpointRegExp = "^[a-zA-Z0-9_-]*\\/{1}[a-zA-Z0-9_-]*$"
+
+var integrationTypes = []string{
+ "alertmanager",
+ "cassandra_cross_service_cluster",
+ "clickhouse_kafka",
+ "clickhouse_postgresql",
+ "dashboard",
+ "datadog",
+ "datasource",
+ "external_aws_cloudwatch_logs",
+ "external_aws_cloudwatch_metrics",
+ "external_elasticsearch_logs",
+ "external_google_cloud_logging",
+ "external_opensearch_logs",
+ "flink",
+ "internal_connectivity",
+ "jolokia",
+ "kafka_connect",
+ "kafka_logs",
+ "kafka_mirrormaker",
+ "logs",
+ "m3aggregator",
+ "m3coordinator",
+ "metrics",
+ "opensearch_cross_cluster_replication",
+ "opensearch_cross_cluster_search",
+ "prometheus",
+ "read_replica",
+ "rsyslog",
+ "schema_registry_proxy",
+}
+
+var aivenServiceIntegrationSchema = map[string]*schema.Schema{
+ "integration_id": {
+ Description: "Service Integration Id at aiven",
+ Computed: true,
+ Type: schema.TypeString,
+ },
+ "destination_endpoint_id": {
+ Description: "Destination endpoint for the integration (if any)",
+ ForceNew: true,
+ Optional: true,
+ Type: schema.TypeString,
+ ValidateFunc: validation.StringMatch(regexp.MustCompile(serviceIntegrationEndpointRegExp),
+ "endpoint id should have the following format: project_name/endpoint_id"),
+ },
+ "destination_service_name": {
+ Description: "Destination service for the integration (if any)",
+ ForceNew: true,
+ Optional: true,
+ Type: schema.TypeString,
+ },
+ "integration_type": {
+ Description: "Type of the service integration. Possible values: " + schemautil.JoinQuoted(integrationTypes, ", ", "`"),
+ ForceNew: true,
+ Required: true,
+ Type: schema.TypeString,
+ ValidateFunc: validation.StringInSlice(integrationTypes, false),
+ },
+ "project": {
+ Description: "Project the integration belongs to",
+ ForceNew: true,
+ Required: true,
+ Type: schema.TypeString,
+ },
+ "source_endpoint_id": {
+ Description: "Source endpoint for the integration (if any)",
+ ForceNew: true,
+ Optional: true,
+ Type: schema.TypeString,
+ ValidateFunc: validation.StringMatch(regexp.MustCompile(serviceIntegrationEndpointRegExp),
+ "endpoint id should have the following format: project_name/endpoint_id"),
+ },
+ "source_service_name": {
+ Description: "Source service for the integration (if any)",
+ ForceNew: true,
+ Optional: true,
+ Type: schema.TypeString,
+ },
+ "logs_user_config": dist.IntegrationTypeLogs(),
+ "kafka_mirrormaker_user_config": dist.IntegrationTypeKafkaMirrormaker(),
+ "kafka_connect_user_config": dist.IntegrationTypeKafkaConnect(),
+ "kafka_logs_user_config": dist.IntegrationTypeKafkaLogs(),
+ "metrics_user_config": dist.IntegrationTypeMetrics(),
+ "datadog_user_config": dist.IntegrationTypeDatadog(),
+ "clickhouse_kafka_user_config": dist.IntegrationTypeClickhouseKafka(),
+ "clickhouse_postgresql_user_config": dist.IntegrationTypeClickhousePostgresql(),
+ "external_aws_cloudwatch_metrics_user_config": dist.IntegrationTypeExternalAwsCloudwatchMetrics(),
+}
+
+func ResourceServiceIntegration() *schema.Resource {
+ return &schema.Resource{
+ Description: "The Service Integration resource allows the creation and management of Aiven Service Integrations.",
+ CreateContext: resourceServiceIntegrationCreate,
+ ReadContext: resourceServiceIntegrationRead,
+ UpdateContext: resourceServiceIntegrationUpdate,
+ DeleteContext: resourceServiceIntegrationDelete,
+ Importer: &schema.ResourceImporter{
+ StateContext: schema.ImportStatePassthroughContext,
+ },
+ Timeouts: schemautil.DefaultResourceTimeouts(),
+
+ Schema: aivenServiceIntegrationSchema,
+ SchemaVersion: 1,
+ StateUpgraders: stateupgrader.ServiceIntegration(),
+ }
+}
+
+func plainEndpointID(fullEndpointID *string) *string {
+ if fullEndpointID == nil {
+ return nil
+ }
+ _, endpointID, err := schemautil.SplitResourceID2(*fullEndpointID)
+ if err != nil {
+ return nil
+ }
+ return &endpointID
+}
+
+func resourceServiceIntegrationCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ client := m.(*aiven.Client)
+
+ projectName := d.Get("project").(string)
+ integrationType := d.Get("integration_type").(string)
+
+ // read_replicas can be only be created alongside the service. also the only way to promote the replica
+ // is to delete the service integration that was created so we should make it least painful to do so.
+ // for now we support to seemlessly import preexisting 'read_replica' service integrations in the resource create
+ // all other integrations should be imported using `terraform import`
+ if integrationType == "read_replica" {
+ if preexisting, err := resourceServiceIntegrationCheckForPreexistingResource(ctx, d, m); err != nil {
+ return diag.Errorf("unable to search for possible preexisting 'read_replica' service integration: %s", err)
+ } else if preexisting != nil {
+ d.SetId(schemautil.BuildResourceID(projectName, preexisting.ServiceIntegrationID))
+ return resourceServiceIntegrationRead(ctx, d, m)
+ }
+ }
+
+ uc, err := resourceServiceIntegrationUserConfigFromSchemaToAPI(d)
+ if err != nil {
+ return diag.FromErr(err)
+ }
+
+ integration, err := client.ServiceIntegrations.Create(
+ ctx,
+ projectName,
+ aiven.CreateServiceIntegrationRequest{
+ DestinationEndpointID: plainEndpointID(schemautil.OptionalStringPointer(d, "destination_endpoint_id")),
+ DestinationService: schemautil.OptionalStringPointer(d, "destination_service_name"),
+ IntegrationType: integrationType,
+ SourceEndpointID: plainEndpointID(schemautil.OptionalStringPointer(d, "source_endpoint_id")),
+ SourceService: schemautil.OptionalStringPointer(d, "source_service_name"),
+ UserConfig: uc,
+ },
+ )
+ if err != nil {
+ return diag.Errorf("error creating serivce integration: %s", err)
+ }
+ d.SetId(schemautil.BuildResourceID(projectName, integration.ServiceIntegrationID))
+
+ if err = resourceServiceIntegrationWaitUntilActive(ctx, d, m); err != nil {
+ return diag.Errorf("unable to wait for service integration to become active: %s", err)
+ }
+ return resourceServiceIntegrationRead(ctx, d, m)
+}
+
+func resourceServiceIntegrationRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ client := m.(*aiven.Client)
+
+ projectName, integrationID, err := schemautil.SplitResourceID2(d.Id())
+ if err != nil {
+ return diag.FromErr(err)
+ }
+
+ integration, err := client.ServiceIntegrations.Get(ctx, projectName, integrationID)
+ if err != nil {
+ err = schemautil.ResourceReadHandleNotFound(err, d)
+ if err != nil {
+ return diag.Errorf("cannot get service integration: %s; id: %s", err, integrationID)
+ }
+ return nil
+ }
+
+ if err = resourceServiceIntegrationCopyAPIResponseToTerraform(d, integration, projectName); err != nil {
+ return diag.Errorf("cannot copy api response into terraform schema: %s", err)
+ }
+
+ return nil
+}
+
+func resourceServiceIntegrationUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ client := m.(*aiven.Client)
+
+ projectName, integrationID, err := schemautil.SplitResourceID2(d.Id())
+ if err != nil {
+ return diag.FromErr(err)
+ }
+
+ userConfig, err := resourceServiceIntegrationUserConfigFromSchemaToAPI(d)
+ if err != nil {
+ return diag.FromErr(err)
+ }
+
+ if userConfig == nil {
+ // Required by API
+ userConfig = make(map[string]interface{})
+ }
+
+ _, err = client.ServiceIntegrations.Update(
+ ctx,
+ projectName,
+ integrationID,
+ aiven.UpdateServiceIntegrationRequest{
+ UserConfig: userConfig,
+ },
+ )
+ if err != nil {
+ return diag.Errorf("unable to update service integration: %s", err)
+ }
+ if err = resourceServiceIntegrationWaitUntilActive(ctx, d, m); err != nil {
+ return diag.Errorf("unable to wait for service integration to become active: %s", err)
+ }
+
+ return resourceServiceIntegrationRead(ctx, d, m)
+}
+
+func resourceServiceIntegrationDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ client := m.(*aiven.Client)
+
+ projectName, integrationID, err := schemautil.SplitResourceID2(d.Id())
+ if err != nil {
+ return diag.FromErr(err)
+ }
+
+ err = client.ServiceIntegrations.Delete(ctx, projectName, integrationID)
+ if err != nil && !aiven.IsNotFound(err) {
+ return diag.Errorf("cannot delete service integration: %s", err)
+ }
+
+ return nil
+}
+
+func resourceServiceIntegrationCheckForPreexistingResource(ctx context.Context, d *schema.ResourceData, m interface{}) (*aiven.ServiceIntegration, error) {
+ client := m.(*aiven.Client)
+
+ projectName := d.Get("project").(string)
+ integrationType := d.Get("integration_type").(string)
+ sourceServiceName := d.Get("source_service_name").(string)
+ destinationServiceName := d.Get("destination_service_name").(string)
+
+ integrations, err := client.ServiceIntegrations.List(ctx, projectName, sourceServiceName)
+ if err != nil && !aiven.IsNotFound(err) {
+ return nil, fmt.Errorf("unable to get list of service integrations: %s", err)
+ }
+
+ for i := range integrations {
+ integration := integrations[i]
+ if integration.SourceService == nil || integration.DestinationService == nil || integration.ServiceIntegrationID == "" {
+ continue
+ }
+
+ if integration.IntegrationType == integrationType &&
+ *integration.SourceService == sourceServiceName &&
+ *integration.DestinationService == destinationServiceName {
+ return integration, nil
+ }
+ }
+ return nil, nil
+}
+
+// nolint:staticcheck // TODO: Migrate to helper/retry package to avoid deprecated resource.StateRefreshFunc.
+func resourceServiceIntegrationWaitUntilActive(ctx context.Context, d *schema.ResourceData, m interface{}) error {
+ const (
+ active = "ACTIVE"
+ notActive = "NOTACTIVE"
+ )
+ client := m.(*aiven.Client)
+
+ projectName, integrationID, err := schemautil.SplitResourceID2(d.Id())
+ if err != nil {
+ return err
+ }
+
+ stateChangeConf := &resource.StateChangeConf{
+ Pending: []string{notActive},
+ Target: []string{active},
+ Refresh: func() (interface{}, string, error) {
+ log.Println("[DEBUG] Service Integration: waiting until active")
+
+ ii, err := client.ServiceIntegrations.Get(ctx, projectName, integrationID)
+ if err != nil {
+ // Sometimes Aiven API retrieves 404 error even when a successful service integration is created
+ if aiven.IsNotFound(err) {
+ log.Println("[DEBUG] Service Integration: not yet found")
+ return nil, notActive, nil
+ }
+ return nil, "", err
+ }
+ if !ii.Active {
+ log.Println("[DEBUG] Service Integration: not yet active")
+ return nil, notActive, nil
+ }
+
+ if ii.IntegrationType == "kafka_connect" && ii.DestinationService != nil {
+ if _, err := client.KafkaConnectors.List(ctx, projectName, *ii.DestinationService); err != nil {
+ log.Println("[DEBUG] Service Integration: error listing kafka connectors: ", err)
+ return nil, notActive, nil
+ }
+ }
+ return ii, active, nil
+ },
+ Delay: 2 * time.Second,
+ Timeout: d.Timeout(schema.TimeoutCreate),
+ MinTimeout: 2 * time.Second,
+ ContinuousTargetOccurence: 10,
+ }
+ if _, err := stateChangeConf.WaitForStateContext(ctx); err != nil {
+ return err
+ }
+ return nil
+}
+
+func resourceServiceIntegrationUserConfigFromSchemaToAPI(d *schema.ResourceData) (map[string]interface{}, error) {
+ integrationType := d.Get("integration_type").(string)
+ return apiconvert.ToAPI(userconfig.IntegrationTypes, integrationType, d)
+}
+
+func resourceServiceIntegrationCopyAPIResponseToTerraform(
+ d *schema.ResourceData,
+ integration *aiven.ServiceIntegration,
+ project string,
+) error {
+ if err := d.Set("project", project); err != nil {
+ return err
+ }
+
+ if integration.DestinationEndpointID != nil {
+ if err := d.Set("destination_endpoint_id", schemautil.BuildResourceID(project, *integration.DestinationEndpointID)); err != nil {
+ return err
+ }
+ } else if integration.DestinationService != nil {
+ if err := d.Set("destination_service_name", *integration.DestinationService); err != nil {
+ return err
+ }
+ }
+ if integration.SourceEndpointID != nil {
+ if err := d.Set("source_endpoint_id", schemautil.BuildResourceID(project, *integration.SourceEndpointID)); err != nil {
+ return err
+ }
+ } else if integration.SourceService != nil {
+ if err := d.Set("source_service_name", *integration.SourceService); err != nil {
+ return err
+ }
+ }
+ if err := d.Set("integration_id", integration.ServiceIntegrationID); err != nil {
+ return err
+ }
+ integrationType := integration.IntegrationType
+ if err := d.Set("integration_type", integrationType); err != nil {
+ return err
+ }
+
+ userConfig, err := apiconvert.FromAPI(userconfig.IntegrationTypes, integrationType, integration.UserConfig)
+ if err != nil {
+ return err
+ }
+
+ if len(userConfig) > 0 {
+ if err := d.Set(integrationType+"_user_config", userConfig); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/internal/sdkprovider/service/serviceintegration/service_integration_data_source.go b/internal/sdkprovider/service/serviceintegration/service_integration_data_source.go
new file mode 100644
index 000000000..014bcc8da
--- /dev/null
+++ b/internal/sdkprovider/service/serviceintegration/service_integration_data_source.go
@@ -0,0 +1,51 @@
+package serviceintegration
+
+import (
+ "context"
+
+ "github.com/aiven/aiven-go-client/v2"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+
+ "github.com/aiven/terraform-provider-aiven/internal/schemautil"
+)
+
+func DatasourceServiceIntegration() *schema.Resource {
+ return &schema.Resource{
+ ReadContext: datasourceServiceIntegrationRead,
+ Description: "The Service Integration data source provides information about the existing Aiven Service Integration.",
+ Schema: schemautil.ResourceSchemaAsDatasourceSchema(aivenServiceIntegrationSchema,
+ "project", "integration_type", "source_service_name", "destination_service_name"),
+ }
+}
+
+func datasourceServiceIntegrationRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ client := m.(*aiven.Client)
+
+ projectName := d.Get("project").(string)
+ integrationType := d.Get("integration_type").(string)
+ sourceServiceName := d.Get("source_service_name").(string)
+ destinationServiceName := d.Get("destination_service_name").(string)
+
+ integrations, err := client.ServiceIntegrations.List(ctx, projectName, sourceServiceName)
+ if err != nil {
+ return diag.Errorf("unable to list integrations for %s/%s: %s", projectName, sourceServiceName, err)
+ }
+
+ for _, i := range integrations {
+ if i.SourceService == nil || i.DestinationService == nil {
+ continue
+ }
+
+ if i.IntegrationType == integrationType &&
+ *i.SourceService == sourceServiceName &&
+ *i.DestinationService == destinationServiceName {
+
+ d.SetId(schemautil.BuildResourceID(projectName, i.ServiceIntegrationID))
+ return resourceServiceIntegrationRead(ctx, d, m)
+ }
+ }
+
+ return diag.Errorf("common integration %s/%s/%s/%s not found",
+ projectName, integrationType, sourceServiceName, destinationServiceName)
+}
diff --git a/internal/sdkprovider/service/serviceintegration/service_integration_test.go b/internal/sdkprovider/service/serviceintegration/service_integration_test.go
index b7966fa94..8bcfa5a9a 100644
--- a/internal/sdkprovider/service/serviceintegration/service_integration_test.go
+++ b/internal/sdkprovider/service/serviceintegration/service_integration_test.go
@@ -25,7 +25,7 @@ func TestAccAivenServiceIntegration_should_fail(t *testing.T) {
{
Config: testAccServiceIntegrationShouldFailResource(),
PlanOnly: true,
- ExpectError: regexp.MustCompile("endpoint id should have the following"),
+ ExpectError: regexp.MustCompile("endpoint id should have the following format: project_name/endpoint_id"),
},
},
})
diff --git a/main.go b/main.go
index 952788618..2f4c890b4 100644
--- a/main.go
+++ b/main.go
@@ -12,7 +12,6 @@ import (
)
//go:generate go test -tags userconfig ./internal/schemautil/userconfig
-//go:generate go run ./ucgenerator/... --integrations clickhouse_kafka,clickhouse_postgresql,datadog,external_aws_cloudwatch_metrics,kafka_connect,kafka_logs,kafka_mirrormaker,logs,metrics
// registryPrefix is the registry prefix for the provider.
const registryPrefix = "registry.terraform.io/"
diff --git a/ucgenerator/main.go b/ucgenerator/main.go
deleted file mode 100644
index ef084010b..000000000
--- a/ucgenerator/main.go
+++ /dev/null
@@ -1,597 +0,0 @@
-package main
-
-import (
- "flag"
- "fmt"
- "go/format"
- "log"
- "os"
- "path/filepath"
- "strconv"
- "strings"
-
- "github.com/aiven/go-api-schemas/pkg/dist"
- "github.com/dave/jennifer/jen"
- "golang.org/x/exp/slices"
- "golang.org/x/tools/imports"
- "gopkg.in/yaml.v3"
-)
-
-const (
- destPath = "./internal/plugin/service/userconfig/"
- localPrefix = "github.com/aiven/terraform-provider-aiven"
- importDiag = "github.com/hashicorp/terraform-plugin-framework/diag"
- importTypes = "github.com/hashicorp/terraform-plugin-framework/types"
- importAttr = "github.com/hashicorp/terraform-plugin-framework/attr"
- importSchemautil = "github.com/aiven/terraform-provider-aiven/internal/schemautil"
- importResourceSchema = "github.com/hashicorp/terraform-plugin-framework/resource/schema"
- importDatasourceSchema = "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
- importSetValidator = "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator"
- importValidator = "github.com/hashicorp/terraform-plugin-framework/schema/validator"
- codeGenerated = "Code generated by user config generator. DO NOT EDIT."
-)
-
-func main() {
- var serviceList, integrationList string
- flag.StringVar(&serviceList, "services", "", "Comma separated service list of names to generate for")
- flag.StringVar(&integrationList, "integrations", "", "Comma separated integrations list of names to generate for")
- flag.Parse()
-
- if serviceList+integrationList == "" {
- log.Fatal("--service or --integrations must be provided")
- }
-
- if serviceList != "" {
- err := generate("service", dist.ServiceTypes, strings.Split(serviceList, ","))
- if err != nil {
- log.Fatal(err)
- }
- }
-
- if integrationList != "" {
- err := generate("integration", dist.IntegrationTypes, strings.Split(integrationList, ","))
- if err != nil {
- log.Fatal(err)
- }
- }
-}
-
-func generate(kind string, data []byte, keys []string) error {
- var root map[string]*object
-
- err := yaml.Unmarshal(data, &root)
- if err != nil {
- return err
- }
-
- for key, o := range root {
- if !slices.Contains(keys, key) {
- continue
- }
-
- pkgName := strings.ReplaceAll(key, "_", "")
- o.isRoot = true
- o.init("UserConfig")
-
- // Generates file
- f := jen.NewFile(pkgName)
- f.HeaderComment(codeGenerated)
- f.ImportAlias(importResourceSchema, "resource")
- f.ImportAlias(importDatasourceSchema, "datasource")
- genAllForObject(f, o)
-
- // Sorts imports
- imports.LocalPrefix = localPrefix
- b, err := imports.Process("", []byte(f.GoString()), nil)
- if err != nil {
- return err
- }
-
- // Saves file
- dirPath := filepath.Join(destPath, kind, pkgName)
- err = os.MkdirAll(dirPath, os.ModePerm)
- if err != nil {
- return err
- }
-
- err = os.WriteFile(filepath.Join(dirPath, key+".go"), b, 0644)
- if err != nil {
- return err
- }
-
- testFile, err := genTestFile(pkgName, o)
- if err != nil {
- return err
- }
-
- testFileByte, err := format.Source([]byte(testFile))
- if err != nil {
- return err
- }
-
- err = os.WriteFile(filepath.Join(dirPath, key+"_test.go"), testFileByte, 0644)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func genAllForObject(f *jen.File, o *object) {
- genSchema(f, o, "Resource", importResourceSchema)
- genSchema(f, o, "DataSource", importDatasourceSchema)
- genTFObject(f, o)
- genDTOObject(f, o)
- genExpander(f, o)
- genFlattener(f, o)
- genAttrsMap(f, o)
-
- for _, p := range o.properties {
- if p.isNestedBlock() {
- if p.Type == objectTypeArray {
- genAllForObject(f, p.ArrayItems)
- } else {
- genAllForObject(f, p)
- }
- }
- }
-
- if !o.isRoot {
- return
- }
-
- // Exports handy public functions for root object only
- f.Op(`
-// Expand public function that converts tf object into dto
-func Expand(ctx context.Context, diags *diag.Diagnostics, set types.Set) *dtoUserConfig {
- return schemautil.ExpandSetBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, set)
-}
-
-// Flatten public function that converts dto into tf object
-func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.Set {
- o := new(dtoUserConfig)
- err := schemautil.MapToDTO(m, o)
- if err != nil {
- diags.AddError("failed to marshal map user config to dto", err.Error())
- return types.SetNull(types.ObjectType{AttrTypes: userConfigAttrs})
- }
- return schemautil.FlattenSetBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o)
-}
-`)
-}
-
-// genExpander creates function that unwraps TF object into json
-func genExpander(f *jen.File, o *object) {
- body := make([]jen.Code, 0)
- props := jen.Dict{}
- for _, p := range o.properties {
- var value *jen.Statement
- switch p.Type {
- case objectTypeObject:
- value = jen.Op(p.varName)
- v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "ExpandSetBlockNested").Types(jen.Id(p.tfoStructName), jen.Id(p.dtoStructName)).Call(
- jen.Id("ctx"),
- jen.Id("diags"),
- jen.Id("expand"+p.camelName),
- jen.Id("o").Dot(p.camelName),
- )
- body = append(body, v, ifErr())
- case objectTypeArray:
- value = jen.Op(p.varName)
- if p.ArrayItems.Type == objectTypeObject {
- // It is a list of objects
- v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "ExpandSetNested").Types(jen.Id(p.tfoStructName), jen.Id(p.dtoStructName)).Call(
- jen.Id("ctx"),
- jen.Id("diags"),
- jen.Id("expand"+p.camelName),
- jen.Id("o").Dot(p.camelName),
- )
- body = append(body, v, ifErr())
- } else {
- // It is a list of scalars
- // We don't want pointer scalars here
- t := strings.ReplaceAll(getDTOType(p.ArrayItems), "*", "")
- v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "ExpandSet").Types(jen.Id(t)).Call(
- jen.Id("ctx"),
- jen.Id("diags"),
- jen.Id("o").Dot(p.camelName),
- )
- body = append(body, v, ifErr())
- }
- default:
- if p.Required {
- value = jen.Id("o").Dot(p.camelName).Dot(getTFTypeToValue(p)).Call()
- } else {
- // Own functions for casting values
- value = jen.Qual(importSchemautil, getTFTypeToValue(p)).Call(jen.Id("o").Dot(p.camelName))
- }
- }
-
- props[jen.Id(p.camelName)] = value
- }
-
- // Function body + return statement
- body = append(
- body,
- jen.Return(jen.Id("&"+o.dtoStructName).Values(props)),
- )
-
- funcName := "expand" + o.camelName
- f.Comment(funcName + " expands tf object into dto object")
- f.Func().Id(funcName).Params(
- jen.Id("ctx").Qual("context", "Context"),
- jen.Id("diags").Op("*").Qual(importDiag, "Diagnostics"),
- jen.Id("o").Op("*"+o.tfoStructName),
- ).Id("*" + o.dtoStructName).Block(body...)
-}
-
-// genFlattener creates function that unwraps json into TF object
-func genFlattener(f *jen.File, o *object) {
- body := make([]jen.Code, 0)
- props := jen.Dict{}
- for _, p := range o.properties {
- var value *jen.Statement
- switch p.Type {
- case objectTypeObject:
- value = jen.Op(p.varName)
- v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "FlattenSetBlockNested").Types(jen.Id(p.dtoStructName), jen.Id(p.tfoStructName)).Call(
- jen.Id("ctx"),
- jen.Id("diags"),
- jen.Id("flatten"+p.camelName),
- jen.Id(p.attrsName),
- jen.Id("o").Dot(p.camelName),
- )
- body = append(body, v, ifErr())
- case objectTypeArray:
- value = jen.Op(p.varName)
- if p.ArrayItems.Type == objectTypeObject {
- // It is a list of objects
- v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "FlattenSetNested").Types(jen.Id(p.dtoStructName), jen.Id(p.tfoStructName)).Call(
- jen.Id("ctx"),
- jen.Id("diags"),
- jen.Id("flatten"+p.camelName),
- jen.Id(p.attrsName),
- jen.Id("o").Dot(p.camelName),
- )
- body = append(body, v, ifErr())
- } else {
- //It is a list of scalars
- v := jen.List(jen.Id(p.varName), jen.Id("d")).Op(":=").Qual(importTypes, "SetValueFrom").Call(
- jen.Id("ctx"),
- jen.Qual(importTypes, getTFType(p.ArrayItems)+"Type"),
- jen.Id("o").Dot(p.camelName),
- )
- body = append(
- body,
- v,
- jen.Id("diags").Dot("Append").Call(jen.Id("d").Op("...")),
- ifErr(),
- )
- }
- default:
- value = jen.Qual(importTypes, getTFTypeFromValue(p)).Call(jen.Id("o").Dot(p.camelName))
- }
-
- if value == nil {
- continue
- }
-
- props[jen.Id(p.camelName)] = value
- }
-
- // Function body + return statement
- body = append(
- body,
- jen.Return(jen.Id("&"+o.tfoStructName).Values(props)),
- )
-
- funcName := "flatten" + o.camelName
- f.Comment(funcName + " flattens dto object into tf object")
- f.Func().Id(funcName).Params(
- jen.Id("ctx").Qual("context", "Context"),
- jen.Id("diags").Op("*").Qual(importDiag, "Diagnostics"),
- jen.Id("o").Op("*"+o.dtoStructName),
- ).Id("*" + o.tfoStructName).Block(body...)
-}
-
-// genAttrsMap creates attributes map for Flatten functions to "unwrap" response json into TF object
-func genAttrsMap(f *jen.File, o *object) {
- values := jen.Dict{}
- for _, p := range o.properties {
- key := jen.Lit(p.tfName)
- switch p.Type {
- case objectTypeArray, objectTypeObject:
- var v jen.Code
- if p.isNestedBlock() {
- v = jen.Qual(importTypes, "ObjectType").Values(jen.Dict{
- jen.Id("AttrTypes"): jen.Id(p.attrsName),
- })
- } else {
- v = jen.Qual(importTypes, getTFType(p.ArrayItems)+"Type")
- }
- values[key] = jen.Qual(importTypes, "SetType").Values(jen.Dict{jen.Id("ElemType"): v})
- default:
- values[key] = jen.Qual(importTypes, getTFType(p)+"Type")
- }
- }
- f.Var().Id(o.attrsName).Op("=").Map(jen.String()).Qual(importAttr, "Type").Values(values)
-}
-
-// genTFObject creates TF object (for plan)
-func genTFObject(f *jen.File, o *object) {
- fields := make([]jen.Code, 0)
- for _, p := range o.properties {
- fields = append(fields, jen.Id(p.camelName).Qual(importTypes, getTFType(p)).Tag(map[string]string{"tfsdk": p.tfName}))
- }
- f.Comment(fmt.Sprintf("%s %s", o.tfoStructName, getDescription(o)))
- f.Type().Id(o.tfoStructName).Struct(fields...)
-}
-
-// genDTOObject creates DTO object to send over HTTP
-func genDTOObject(f *jen.File, o *object) {
- fields := make([]jen.Code, 0)
- for _, p := range o.properties {
- tags := map[string]string{"json": p.jsonName, "groups": "create"}
- if !p.Required {
- tags["json"] += ",omitempty"
- }
- if !p.CreateOnly {
- tags["groups"] += ",update"
- }
- fields = append(fields, jen.Id(p.camelName).Id(getDTOType(p)).Tag(tags))
- }
- f.Comment(o.dtoStructName + " request/response object")
- f.Type().Id(o.dtoStructName).Struct(fields...)
-}
-
-// genSchema generates TF schema. For root object only, i.e. RedisUserConfig
-func genSchema(f *jen.File, o *object, name, pkg string) {
- if !o.isRoot {
- return
- }
-
- funcName := fmt.Sprintf("New%sSchema", name)
- f.Comment(fmt.Sprintf("%s returns %s schema", funcName, strings.ToLower(name)))
- f.Func().Id(funcName).Params().Qual(pkg, "SetNestedBlock").Block(
- jen.Return(getSchemaAttributes(o, pkg)),
- )
-}
-
-func getSchemaAttributes(o *object, pkg string) jen.Code {
- isResource := pkg == importResourceSchema
- blocks := jen.Dict{}
- attribs := jen.Dict{}
-
- // Array properties are its item properties
- properties := o.properties
- if o.Type == objectTypeArray {
- properties = o.ArrayItems.properties
- }
-
- for _, p := range properties {
- key := jen.Lit(p.tfName)
- if p.isNestedBlock() {
- blocks[key] = getSchemaAttributes(p, pkg)
- } else {
- // For scalars
- var value *jen.Statement
- switch p.Type {
- case objectTypeObject:
- // Schemaless map
- panic("schemaless objects are not supported")
- case objectTypeArray:
- value = jen.Qual(importTypes, getTFType(p.ArrayItems)+"Type")
- }
-
- values := getSchemaAttributeValues(p, isResource)
- values[jen.Id("ElementType")] = value
- attribs[jen.Lit(p.tfName)] = jen.Qual(pkg, getTFType(p)+"Attribute").Values(values)
- }
- }
-
- nested := jen.Dict{}
- if len(blocks) > 0 {
- nested[jen.Id("Blocks")] = jen.Map(jen.String()).Qual(pkg, "Block").Values(blocks)
- }
-
- if len(attribs) > 0 {
- nested[jen.Id("Attributes")] = jen.Map(jen.String()).Qual(pkg, "Attribute").Values(attribs)
- }
-
- values := getSchemaAttributeValues(o, isResource)
- values[jen.Id("NestedObject")] = jen.Qual(pkg, "NestedBlockObject").Values(nested)
- return jen.Qual(pkg, "SetNestedBlock").Values(values)
-}
-
-func getSchemaAttributeValues(o *object, isResource bool) jen.Dict {
- a := jen.Dict{}
-
- if d := getDescription(o); d != "" {
- a[jen.Id("Description")] = jen.Lit(d)
- }
-
- if o.IsDeprecated {
- a[jen.Id("DeprecationMessage")] = jen.Lit(fmt.Sprintf("%q is deprecated", o.tfName))
- }
-
- validators := make([]jen.Code, 0)
- if o.MinItems != nil {
- validators = append(validators, valSizeAtLeast(*o.MinItems))
- }
-
- if o.MaxItems != nil {
- validators = append(validators, valSizeAtMost(*o.MaxItems))
- }
-
- if !o.isNestedBlock() {
- if !isResource {
- a[jen.Id("Computed")] = jen.True()
- } else {
- if o.Required {
- a[jen.Id("Required")] = jen.True()
- } else {
- a[jen.Id("Computed")] = jen.True()
- a[jen.Id("Optional")] = jen.True()
-
- if o.Default != nil {
- a[jen.Id("Default")] = getStaticDefault(o)
- }
- }
- }
- }
-
- if len(validators) > 0 {
- a[jen.Id("Validators")] = valValidatorSet(validators...)
- }
-
- return a
-}
-
-// getTFType matches generator types into plugin types
-func getTFType(o *object) string {
- switch o.Type {
- case objectTypeObject:
- if o.isNestedBlock() {
- return "Set"
- }
- return "Map"
- case objectTypeArray:
- return "Set"
- case objectTypeString:
- return "String"
- case objectTypeBoolean:
- return "Bool"
- case objectTypeInteger:
- return "Int64"
- case objectTypeNumber:
- return "Float64"
- }
- panic(fmt.Sprintf("Unknown type for %q", o.jsonName))
-}
-
-func getTFTypeToValue(o *object) string {
- v := getTFType(o)
- if !o.Required {
- return fmt.Sprintf("Value%sPointer", v)
- }
- return "Value" + v
-}
-
-func getTFTypeFromValue(o *object) string {
- v := getTFType(o)
- if !o.Required {
- return v + "PointerValue"
- }
- return v + "Value"
-}
-
-func getDTOType(o *object) string {
- optional := "*"
- if o.Required {
- optional = ""
- }
-
- switch o.Type {
- case objectTypeObject:
- return "*" + o.dtoStructName
- case objectTypeArray:
- t := "[]" + getDTOType(o.ArrayItems)
- if o.ArrayItems.Type == objectTypeObject {
- return t
- }
- // We don't want pointer scalars in slice
- return strings.ReplaceAll(t, "*", "")
- case objectTypeString:
- return optional + "string"
- case objectTypeBoolean:
- return optional + "bool"
- case objectTypeInteger:
- return optional + "int64"
- case objectTypeNumber:
- return optional + "float64"
- }
- panic(fmt.Sprintf("Unknown type for %q", o.jsonName))
-}
-
-// getStaticDefault returns "default" value for given field
-func getStaticDefault(o *object) *jen.Statement {
- var v *jen.Statement
- switch o.Type {
- case objectTypeString:
- v = jen.Lit(o.Default.(string))
- case objectTypeInteger:
- d, err := strconv.Atoi(o.Default.(string))
- if err != nil {
- return nil
- }
- v = jen.Lit(d)
- case objectTypeNumber:
- v = jen.Lit(o.Default.(float64))
- case objectTypeBoolean:
- v = jen.Lit(o.Default.(bool))
- default:
- return nil
- }
- d := getTFType(o)
- i := fmt.Sprintf("%s/%sdefault", importResourceSchema, strings.ToLower(d))
- return jen.Qual(i, "Static"+d).Call(v)
-}
-
-func getDescription(o *object) string {
- desc := make([]string, 0)
- d := o.Description
- if len(d) < len(o.Title) {
- d = o.Title
- }
-
- if d != "" {
- desc = append(desc, addDot(d))
- }
-
- if o.Default != nil && o.Type != objectTypeArray {
- desc = append(desc, fmt.Sprintf("The default value is `%v`.", o.Default))
- }
-
- // Trims dot from description, so it doesn't look weird with link to nested schema
- // Example: Databases to expose[dot] (see [below for nested schema]...)
- if len(desc) == 1 && o.isNestedBlock() {
- return strings.Trim(desc[0], ".")
- }
-
- return strings.Join(desc, " ")
-}
-
-func addDot(s string) string {
- if s != "" {
- switch s[len(s)-1:] {
- case ".", "!", "?":
- default:
- s += "."
- }
- }
- return s
-}
-
-func getValidator(name string, v any) *jen.Statement {
- return jen.Qual(importSetValidator, name).Call(jen.Lit(v))
-}
-
-func valSizeAtLeast(n int) *jen.Statement {
- return getValidator("SizeAtLeast", n)
-}
-
-func valSizeAtMost(n int) *jen.Statement {
- return getValidator("SizeAtMost", n)
-}
-
-func valValidatorSet(c ...jen.Code) *jen.Statement {
- return jen.Index().Qual(importValidator, "Set").Values(c...)
-}
-
-func ifErr() *jen.Statement {
- return jen.If(jen.Id("diags").Dot("HasError").Call()).Block(jen.Return(jen.Nil()))
-}
-
-func toPtr[T any](v T) *T {
- return &v
-}
diff --git a/ucgenerator/models.go b/ucgenerator/models.go
deleted file mode 100644
index 2b6942269..000000000
--- a/ucgenerator/models.go
+++ /dev/null
@@ -1,142 +0,0 @@
-package main
-
-import (
- "strings"
-
- "github.com/stoewer/go-strcase"
- "golang.org/x/exp/slices"
-)
-
-type objectType string
-
-const (
- objectTypeObject objectType = "object"
- objectTypeArray objectType = "array"
- objectTypeString objectType = "string"
- objectTypeBoolean objectType = "boolean"
- objectTypeInteger objectType = "integer"
- objectTypeNumber objectType = "number"
-)
-
-type object struct {
- isRoot bool // top level object
- jsonName string // original name from json spec
- tfName string // terraform manifest field, unlike jsonName, can't store dot symbol
- tfoStructName string
- dtoStructName string
- camelName string
- varName string
- attrsName string
- properties []*object
- parent *object
-
- Type objectType `yaml:"-"`
- Required bool `yaml:"-"`
-
- IsDeprecated bool `yaml:"is_deprecated"`
- Default any `yaml:"default"`
- Enum []*struct {
- Value string `yaml:"value"`
- IsDeprecated bool `yaml:"is_deprecated"`
- } `yaml:"enum"`
- Pattern string `yaml:"pattern"`
- MinItems *int `yaml:"min_items"`
- MaxItems *int `yaml:"max_items"`
- MinLength *int `yaml:"min_length"`
- MaxLength *int `yaml:"max_length"`
- Minimum *float64 `yaml:"minimum"`
- Maximum *float64 `yaml:"maximum"`
- OrigType any `yaml:"type"`
- Format string `yaml:"format"`
- Title string `yaml:"title"`
- Description string `yaml:"description"`
- Properties map[string]*object `yaml:"properties"`
- ArrayItems *object `yaml:"items"`
- RequiredFields []string `yaml:"required"`
- CreateOnly bool `yaml:"create_only"`
- Nullable bool `yaml:"-"`
-}
-
-func (o *object) isNestedBlock() bool {
- switch o.Type {
- case objectTypeObject:
- return len(o.Properties) > 0
- case objectTypeArray:
- switch o.ArrayItems.Type {
- case objectTypeObject, objectTypeArray:
- return true
- }
- }
- return false
-}
-
-func (o *object) init(name string) {
- o.jsonName = name
- o.tfName = strings.ReplaceAll(name, ".", "__")
- o.camelName = toCamelCase(name)
-
- low := toLowerFirst(o.camelName)
- o.varName = low + "Var"
- o.attrsName = low + "Attrs"
- o.tfoStructName = "tfo" + o.camelName
- o.dtoStructName = "dto" + o.camelName
-
- // Sorts properties, so they keep order on each generation
- keys := make([]string, 0, len(o.Properties))
- for k := range o.Properties {
- keys = append(keys, k)
- }
- slices.Sort(keys)
- for _, k := range keys {
- o.properties = append(o.properties, o.Properties[k])
- }
-
- required := make(map[string]bool, len(o.RequiredFields))
- for _, k := range o.RequiredFields {
- required[k] = true
- }
-
- for _, k := range keys {
- child := o.Properties[k]
- child.parent = o
- child.Required = required[k]
- child.init(k)
- }
-
- // Types can be list of strings, or a string
- if v, ok := o.OrigType.(string); ok {
- o.Type = objectType(v)
- } else if v, ok := o.OrigType.([]interface{}); ok {
- o.Type = objectType(v[0].(string))
- for _, t := range v {
- switch s := t.(string); s {
- case "null":
- o.Nullable = true
- default:
- o.Type = objectType(s)
- }
- }
- }
-
- if o.Type == objectTypeArray {
- o.ArrayItems.parent = o
- o.ArrayItems.init(name)
- }
-
- // In terraform objects are lists of one item
- // Root item and properties should have max constraint
- if o.Type == objectTypeObject {
- if o.isRoot || o.parent != nil && o.parent.Type == objectTypeObject {
- o.MaxItems = toPtr(1)
- }
- }
-}
-
-// toCamelCase some fields has dots within, makes cleaner camelCase
-func toCamelCase(s string) string {
- return strcase.UpperCamelCase(strings.ReplaceAll(s, ".", "_"))
-}
-
-func toLowerFirst(s string) string {
- return strings.ToLower(s[0:1]) + s[1:]
-}
diff --git a/ucgenerator/tests.go b/ucgenerator/tests.go
deleted file mode 100644
index 2cffe73e5..000000000
--- a/ucgenerator/tests.go
+++ /dev/null
@@ -1,151 +0,0 @@
-package main
-
-import (
- "encoding/json"
- "fmt"
- "strings"
-)
-
-// genJSONSample generates sample JSON for a test
-// If not allFields provided, creates a smaller json, which helps to test nil values (missing)
-func genJSONSample(b *strings.Builder, o *object, allFields bool) string {
- switch o.Type {
- case objectTypeObject:
- b.WriteString("{")
- for i, p := range o.properties {
- // Either field required or all fields printed
- if !(p.Required || allFields || !p.CreateOnly) {
- continue
- }
-
- b.WriteString(fmt.Sprintf("%q:", p.jsonName))
- genJSONSample(b, p, allFields)
- if i+1 != len(o.properties) {
- b.WriteString(",")
- }
- }
- b.WriteString("}")
- case objectTypeArray:
- b.WriteString("[")
- genJSONSample(b, o.ArrayItems, allFields)
- b.WriteString("]")
- case objectTypeString:
- b.WriteString(`"foo"`)
- case objectTypeBoolean:
- b.WriteString("true")
- case objectTypeInteger:
- b.WriteString("1")
- case objectTypeNumber:
- b.WriteString("1")
- }
- return b.String()
-}
-
-func genTestFile(pkg string, o *object) (string, error) {
- allFields, err := indentJSON(genJSONSample(new(strings.Builder), o, true))
- if err != nil {
- return "", err
- }
-
- updateOnlyFields, err := indentJSON(genJSONSample(new(strings.Builder), o, false))
- if err != nil {
- return "", err
- }
-
- file := fmt.Sprintf(
- testFile,
- codeGenerated,
- pkg,
- o.camelName,
- fmt.Sprintf("`%s`", allFields),
- fmt.Sprintf("`%s`", updateOnlyFields),
- )
-
- return strings.TrimSpace(file), nil
-}
-
-func indentJSON(s string) (string, error) {
- s = strings.ReplaceAll(s, ",}", "}") // fixes trailing comma when not all fields are generated
- m := make(map[string]any)
- err := json.Unmarshal([]byte(s), &m)
- if err != nil {
- return "", err
- }
-
- b, err := json.MarshalIndent(m, "", " ")
- if err != nil {
- return "", err
- }
- return string(b), nil
-}
-
-const testFile = `
-// %[1]s
-
-package %[2]s
-
-import (
- "context"
- "encoding/json"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/hashicorp/terraform-plugin-framework/diag"
- "github.com/stretchr/testify/require"
-
- "github.com/aiven/terraform-provider-aiven/internal/schemautil"
-)
-
-const allFields = %[4]s
-const updateOnlyFields = %[5]s
-
-func Test%[3]s(t *testing.T) {
- cases := []struct{
- name string
- source string
- expect string
- create bool
- }{
- {
- name: "fields to create resource",
- source: allFields,
- expect: allFields,
- create: true,
- },
- {
- name: "only fields to update resource",
- source: allFields,
- expect: updateOnlyFields, // usually, fewer fields
- create: false,
- },
- }
-
- ctx := context.Background()
- diags := new(diag.Diagnostics)
- for _, opt := range cases {
- t.Run(opt.name, func(t *testing.T) {
- dto := new(dto%[3]s)
- err := json.Unmarshal([]byte(opt.source), dto)
- require.NoError(t, err)
-
- // From json to TF
- tfo := flatten%[3]s(ctx, diags, dto)
- require.Empty(t, diags)
-
- // From TF to json
- config := expand%[3]s(ctx, diags, tfo)
- require.Empty(t, diags)
-
- // Run specific marshal (create or update resource)
- dtoConfig, err := schemautil.MarshalUserConfig(config, opt.create)
- require.NoError(t, err)
-
- // Compares that output is strictly equal to the input
- // If so, the flow is valid
- b, err := json.MarshalIndent(dtoConfig, "", " ")
- require.NoError(t, err)
- require.Empty(t, cmp.Diff(opt.expect, string(b)))
- })
- }
-}
-`