From bda0b12493a3f0df25ad120e4425595177308531 Mon Sep 17 00:00:00 2001 From: Murad Biashimov Date: Wed, 30 Aug 2023 15:23:55 +0200 Subject: [PATCH] chore(plugin): add new user config generator --- docs/data-sources/service_integration.md | 226 +++---- docs/resources/service_integration.md | 53 +- examples_tests/base.go | 2 + go.mod | 7 +- go.sum | 15 +- internal/plugin/provider.go | 3 + .../service/serviceintegration/models.go | 98 +++ .../service_integration_data_source.go | 140 ++++ .../service_integration_resource.go | 341 ++++++++++ .../service/serviceintegration/userconfig.go | 134 ++++ .../clickhousekafka/clickhouse_kafka.go | 408 ++++++++++++ .../clickhousekafka/clickhouse_kafka_test.go | 122 ++++ .../clickhouse_postgresql.go | 145 +++++ .../clickhouse_postgresql_test.go | 82 +++ .../userconfig/integration/datadog/datadog.go | 460 ++++++++++++++ .../integration/datadog/datadog_test.go | 132 ++++ .../external_aws_cloudwatch_metrics.go | 224 +++++++ .../external_aws_cloudwatch_metrics_test.go | 94 +++ .../integration/kafkaconnect/kafka_connect.go | 168 +++++ .../kafkaconnect/kafka_connect_test.go | 82 +++ .../integration/kafkalogs/kafka_logs.go | 114 ++++ .../integration/kafkalogs/kafka_logs_test.go | 78 +++ .../kafkamirrormaker/kafka_mirrormaker.go | 220 +++++++ .../kafka_mirrormaker_test.go | 88 +++ .../userconfig/integration/logs/logs.go | 133 ++++ .../userconfig/integration/logs/logs_test.go | 80 +++ .../userconfig/integration/metrics/metrics.go | 414 ++++++++++++ .../integration/metrics/metrics_test.go | 114 ++++ internal/plugin/util/schema.go | 16 +- internal/plugin/util/wait.go | 20 + internal/schemautil/plugin.go | 153 +++++ internal/sdkprovider/provider/provider.go | 3 +- .../service/kafkatopic/kafka_topic_cache.go | 2 - .../service_integration_test.go | 2 +- main.go | 1 + ucgenerator/main.go | 598 ++++++++++++++++++ ucgenerator/models.go | 142 +++++ ucgenerator/tests.go | 151 +++++ 38 files changed, 5110 insertions(+), 155 deletions(-) create mode 100644 internal/plugin/service/serviceintegration/models.go create mode 100644 internal/plugin/service/serviceintegration/service_integration_data_source.go create mode 100644 internal/plugin/service/serviceintegration/service_integration_resource.go create mode 100644 internal/plugin/service/serviceintegration/userconfig.go create mode 100644 internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka.go create mode 100644 internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka_test.go create mode 100644 internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql.go create mode 100644 internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql_test.go create mode 100644 internal/plugin/service/userconfig/integration/datadog/datadog.go create mode 100644 internal/plugin/service/userconfig/integration/datadog/datadog_test.go create mode 100644 internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics.go create mode 100644 internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics_test.go create mode 100644 internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect.go create mode 100644 internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect_test.go create mode 100644 internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs.go create mode 100644 internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs_test.go create mode 100644 internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker.go create mode 100644 internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker_test.go create mode 100644 internal/plugin/service/userconfig/integration/logs/logs.go create mode 100644 internal/plugin/service/userconfig/integration/logs/logs_test.go create mode 100644 internal/plugin/service/userconfig/integration/metrics/metrics.go create mode 100644 internal/plugin/service/userconfig/integration/metrics/metrics_test.go create mode 100644 internal/plugin/util/wait.go create mode 100644 internal/schemautil/plugin.go create mode 100644 ucgenerator/main.go create mode 100644 ucgenerator/models.go create mode 100644 ucgenerator/tests.go diff --git a/docs/data-sources/service_integration.md b/docs/data-sources/service_integration.md index 354e8152b..f3f35d1e6 100644 --- a/docs/data-sources/service_integration.md +++ b/docs/data-sources/service_integration.md @@ -30,248 +30,248 @@ data "aiven_service_integration" "myintegration" { ### Read-Only -- `clickhouse_kafka_user_config` (List of Object) ClickhouseKafka user configurable settings (see [below for nested schema](#nestedatt--clickhouse_kafka_user_config)) -- `clickhouse_postgresql_user_config` (List of Object) ClickhousePostgresql user configurable settings (see [below for nested schema](#nestedatt--clickhouse_postgresql_user_config)) -- `datadog_user_config` (List of Object) Datadog user configurable settings (see [below for nested schema](#nestedatt--datadog_user_config)) +- `clickhouse_kafka_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config)) +- `clickhouse_postgresql_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config)) +- `datadog_user_config` (Block List) (see [below for nested schema](#nestedblock--datadog_user_config)) - `destination_endpoint_id` (String) Destination endpoint for the integration (if any) -- `external_aws_cloudwatch_metrics_user_config` (List of Object) ExternalAwsCloudwatchMetrics user configurable settings (see [below for nested schema](#nestedatt--external_aws_cloudwatch_metrics_user_config)) +- `external_aws_cloudwatch_metrics_user_config` (Block List) External AWS CloudWatch Metrics integration user config (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config)) - `id` (String) The ID of this resource. - `integration_id` (String) Service Integration Id at aiven -- `kafka_connect_user_config` (List of Object) KafkaConnect user configurable settings (see [below for nested schema](#nestedatt--kafka_connect_user_config)) -- `kafka_logs_user_config` (List of Object) KafkaLogs user configurable settings (see [below for nested schema](#nestedatt--kafka_logs_user_config)) -- `kafka_mirrormaker_user_config` (List of Object) KafkaMirrormaker user configurable settings (see [below for nested schema](#nestedatt--kafka_mirrormaker_user_config)) -- `logs_user_config` (List of Object) Logs user configurable settings (see [below for nested schema](#nestedatt--logs_user_config)) -- `metrics_user_config` (List of Object) Metrics user configurable settings (see [below for nested schema](#nestedatt--metrics_user_config)) +- `kafka_connect_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--kafka_connect_user_config)) +- `kafka_logs_user_config` (Block List) (see [below for nested schema](#nestedblock--kafka_logs_user_config)) +- `kafka_mirrormaker_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config)) +- `logs_user_config` (Block List) (see [below for nested schema](#nestedblock--logs_user_config)) +- `metrics_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--metrics_user_config)) - `source_endpoint_id` (String) Source endpoint for the integration (if any) - + ### Nested Schema for `clickhouse_kafka_user_config` Read-Only: -- `tables` (List of Object) (see [below for nested schema](#nestedobjatt--clickhouse_kafka_user_config--tables)) +- `tables` (Block List) Tables to create (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables)) - + ### Nested Schema for `clickhouse_kafka_user_config.tables` Read-Only: -- `auto_offset_reset` (String) -- `columns` (List of Object) (see [below for nested schema](#nestedobjatt--clickhouse_kafka_user_config--tables--columns)) -- `data_format` (String) -- `date_time_input_format` (String) -- `group_name` (String) -- `handle_error_mode` (String) -- `max_block_size` (Number) -- `max_rows_per_message` (Number) -- `name` (String) -- `num_consumers` (Number) -- `poll_max_batch_size` (Number) -- `skip_broken_messages` (Number) -- `topics` (List of Object) (see [below for nested schema](#nestedobjatt--clickhouse_kafka_user_config--tables--topics)) - - +- `auto_offset_reset` (String) Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`. +- `columns` (Block List) Table columns (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--columns)) +- `data_format` (String) Message data format. The default value is `JSONEachRow`. +- `date_time_input_format` (String) Method to read DateTime from text input formats. The default value is `basic`. +- `group_name` (String) Kafka consumers group. The default value is `clickhouse`. +- `handle_error_mode` (String) How to handle errors for Kafka engine. The default value is `default`. +- `max_block_size` (Number) Number of row collected by poll(s) for flushing data from Kafka. The default value is `0`. +- `max_rows_per_message` (Number) The maximum number of rows produced in one kafka message for row-based formats. The default value is `1`. +- `name` (String) Name of the table. +- `num_consumers` (Number) The number of consumers per table per replica. The default value is `1`. +- `poll_max_batch_size` (Number) Maximum amount of messages to be polled in a single Kafka poll. The default value is `0`. +- `skip_broken_messages` (Number) Skip at least this number of broken messages from Kafka topic per block. The default value is `0`. +- `topics` (Block List) Kafka topics (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--topics)) + + ### Nested Schema for `clickhouse_kafka_user_config.tables.columns` Read-Only: -- `name` (String) -- `type` (String) +- `name` (String) Column name. +- `type` (String) Column type. - + ### Nested Schema for `clickhouse_kafka_user_config.tables.topics` Read-Only: -- `name` (String) +- `name` (String) Name of the topic. - + ### Nested Schema for `clickhouse_postgresql_user_config` Read-Only: -- `databases` (List of Object) (see [below for nested schema](#nestedobjatt--clickhouse_postgresql_user_config--databases)) +- `databases` (Block List) Databases to expose (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config--databases)) - + ### Nested Schema for `clickhouse_postgresql_user_config.databases` Read-Only: -- `database` (String) -- `schema` (String) +- `database` (String) PostgreSQL database to expose. The default value is `defaultdb`. +- `schema` (String) PostgreSQL schema to expose. The default value is `public`. - + ### Nested Schema for `datadog_user_config` Read-Only: -- `datadog_dbm_enabled` (Boolean) -- `datadog_tags` (List of Object) (see [below for nested schema](#nestedobjatt--datadog_user_config--datadog_tags)) -- `exclude_consumer_groups` (List of String) -- `exclude_topics` (List of String) -- `include_consumer_groups` (List of String) -- `include_topics` (List of String) -- `kafka_custom_metrics` (List of String) -- `max_jmx_metrics` (Number) -- `opensearch` (List of Object) (see [below for nested schema](#nestedobjatt--datadog_user_config--opensearch)) -- `redis` (List of Object) (see [below for nested schema](#nestedobjatt--datadog_user_config--redis)) - - +- `datadog_dbm_enabled` (Boolean) Enable Datadog Database Monitoring. +- `datadog_tags` (Block List) Custom tags provided by user (see [below for nested schema](#nestedblock--datadog_user_config--datadog_tags)) +- `exclude_consumer_groups` (List of String) List of custom metrics. +- `exclude_topics` (List of String) List of topics to exclude. +- `include_consumer_groups` (List of String) List of custom metrics. +- `include_topics` (List of String) List of topics to include. +- `kafka_custom_metrics` (List of String) List of custom metrics. +- `max_jmx_metrics` (Number) Maximum number of JMX metrics to send. +- `opensearch` (Block List) Datadog Opensearch Options (see [below for nested schema](#nestedblock--datadog_user_config--opensearch)) +- `redis` (Block List) Datadog Redis Options (see [below for nested schema](#nestedblock--datadog_user_config--redis)) + + ### Nested Schema for `datadog_user_config.datadog_tags` Read-Only: -- `comment` (String) -- `tag` (String) +- `comment` (String) Optional tag explanation. +- `tag` (String) Tag format and usage are described here: https://docs.datadoghq.com/getting_started/tagging. Tags with prefix 'aiven-' are reserved for Aiven. - + ### Nested Schema for `datadog_user_config.opensearch` Read-Only: -- `index_stats_enabled` (Boolean) -- `pending_task_stats_enabled` (Boolean) -- `pshard_stats_enabled` (Boolean) +- `index_stats_enabled` (Boolean) Enable Datadog Opensearch Index Monitoring. +- `pending_task_stats_enabled` (Boolean) Enable Datadog Opensearch Pending Task Monitoring. +- `pshard_stats_enabled` (Boolean) Enable Datadog Opensearch Primary Shard Monitoring. - + ### Nested Schema for `datadog_user_config.redis` Read-Only: -- `command_stats_enabled` (Boolean) +- `command_stats_enabled` (Boolean) Enable command_stats option in the agent's configuration. The default value is `false`. - + ### Nested Schema for `external_aws_cloudwatch_metrics_user_config` Read-Only: -- `dropped_metrics` (List of Object) (see [below for nested schema](#nestedobjatt--external_aws_cloudwatch_metrics_user_config--dropped_metrics)) -- `extra_metrics` (List of Object) (see [below for nested schema](#nestedobjatt--external_aws_cloudwatch_metrics_user_config--extra_metrics)) +- `dropped_metrics` (Block List) Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--dropped_metrics)) +- `extra_metrics` (Block List) Metrics to allow through to AWS CloudWatch (in addition to default metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--extra_metrics)) - + ### Nested Schema for `external_aws_cloudwatch_metrics_user_config.dropped_metrics` Read-Only: -- `field` (String) -- `metric` (String) +- `field` (String) Identifier of a value in the metric. +- `metric` (String) Identifier of the metric. - + ### Nested Schema for `external_aws_cloudwatch_metrics_user_config.extra_metrics` Read-Only: -- `field` (String) -- `metric` (String) +- `field` (String) Identifier of a value in the metric. +- `metric` (String) Identifier of the metric. - + ### Nested Schema for `kafka_connect_user_config` Read-Only: -- `kafka_connect` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_connect_user_config--kafka_connect)) +- `kafka_connect` (Block List) Kafka Connect service configuration values (see [below for nested schema](#nestedblock--kafka_connect_user_config--kafka_connect)) - + ### Nested Schema for `kafka_connect_user_config.kafka_connect` Read-Only: -- `config_storage_topic` (String) -- `group_id` (String) -- `offset_storage_topic` (String) -- `status_storage_topic` (String) +- `config_storage_topic` (String) The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id. +- `group_id` (String) A unique string that identifies the Connect cluster group this worker belongs to. +- `offset_storage_topic` (String) The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id. +- `status_storage_topic` (String) The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id. - + ### Nested Schema for `kafka_logs_user_config` Read-Only: -- `kafka_topic` (String) -- `selected_log_fields` (List of String) +- `kafka_topic` (String) Topic name. +- `selected_log_fields` (List of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent. - + ### Nested Schema for `kafka_mirrormaker_user_config` Read-Only: -- `cluster_alias` (String) -- `kafka_mirrormaker` (List of Object) (see [below for nested schema](#nestedobjatt--kafka_mirrormaker_user_config--kafka_mirrormaker)) +- `cluster_alias` (String) The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, '.', '_', and '-'. +- `kafka_mirrormaker` (Block List) Kafka MirrorMaker configuration values (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--kafka_mirrormaker)) - + ### Nested Schema for `kafka_mirrormaker_user_config.kafka_mirrormaker` Read-Only: -- `consumer_fetch_min_bytes` (Number) -- `producer_batch_size` (Number) -- `producer_buffer_memory` (Number) -- `producer_compression_type` (String) -- `producer_linger_ms` (Number) -- `producer_max_request_size` (Number) +- `consumer_fetch_min_bytes` (Number) The minimum amount of data the server should return for a fetch request. +- `producer_batch_size` (Number) The batch size in bytes producer will attempt to collect before publishing to broker. +- `producer_buffer_memory` (Number) The amount of bytes producer can use for buffering data before publishing to broker. +- `producer_compression_type` (String) Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression. +- `producer_linger_ms` (Number) The linger time (ms) for waiting new data to arrive for publishing. +- `producer_max_request_size` (Number) The maximum request size in bytes. - + ### Nested Schema for `logs_user_config` Read-Only: -- `elasticsearch_index_days_max` (Number) -- `elasticsearch_index_prefix` (String) -- `selected_log_fields` (List of String) +- `elasticsearch_index_days_max` (Number) Elasticsearch index retention limit. The default value is `3`. +- `elasticsearch_index_prefix` (String) Elasticsearch index prefix. The default value is `logs`. +- `selected_log_fields` (List of String) The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent. - + ### Nested Schema for `metrics_user_config` Read-Only: -- `database` (String) -- `retention_days` (Number) -- `ro_username` (String) -- `source_mysql` (List of Object) (see [below for nested schema](#nestedobjatt--metrics_user_config--source_mysql)) -- `username` (String) +- `database` (String) Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to 'metrics'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. +- `retention_days` (Number) Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days. +- `ro_username` (String) Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to 'metrics_reader'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. +- `source_mysql` (Block List) Configuration options for metrics where source service is MySQL (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql)) +- `username` (String) Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to 'metrics_writer'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - + ### Nested Schema for `metrics_user_config.source_mysql` Read-Only: -- `telegraf` (List of Object) (see [below for nested schema](#nestedobjatt--metrics_user_config--source_mysql--telegraf)) +- `telegraf` (Block List) Configuration options for Telegraf MySQL input plugin (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql--telegraf)) - + ### Nested Schema for `metrics_user_config.source_mysql.telegraf` Read-Only: -- `gather_event_waits` (Boolean) -- `gather_file_events_stats` (Boolean) -- `gather_index_io_waits` (Boolean) -- `gather_info_schema_auto_inc` (Boolean) -- `gather_innodb_metrics` (Boolean) -- `gather_perf_events_statements` (Boolean) -- `gather_process_list` (Boolean) -- `gather_slave_status` (Boolean) -- `gather_table_io_waits` (Boolean) -- `gather_table_lock_waits` (Boolean) -- `gather_table_schema` (Boolean) -- `perf_events_statements_digest_text_limit` (Number) -- `perf_events_statements_limit` (Number) -- `perf_events_statements_time_limit` (Number) +- `gather_event_waits` (Boolean) Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS. +- `gather_file_events_stats` (Boolean) gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME. +- `gather_index_io_waits` (Boolean) Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE. +- `gather_info_schema_auto_inc` (Boolean) Gather auto_increment columns and max values from information schema. +- `gather_innodb_metrics` (Boolean) Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS. +- `gather_perf_events_statements` (Boolean) Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST. +- `gather_process_list` (Boolean) Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST. +- `gather_slave_status` (Boolean) Gather metrics from SHOW SLAVE STATUS command output. +- `gather_table_io_waits` (Boolean) Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE. +- `gather_table_lock_waits` (Boolean) Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS. +- `gather_table_schema` (Boolean) Gather metrics from INFORMATION_SCHEMA.TABLES. +- `perf_events_statements_digest_text_limit` (Number) Truncates digest text from perf_events_statements into this many characters. +- `perf_events_statements_limit` (Number) Limits metrics from perf_events_statements. +- `perf_events_statements_time_limit` (Number) Only include perf_events_statements whose last seen is less than this many seconds. diff --git a/docs/resources/service_integration.md b/docs/resources/service_integration.md index e8bef4aed..9218f0576 100644 --- a/docs/resources/service_integration.md +++ b/docs/resources/service_integration.md @@ -33,17 +33,17 @@ resource "aiven_service_integration" "my_integration_metrics" { ### Optional -- `clickhouse_kafka_user_config` (Block List, Max: 1) ClickhouseKafka user configurable settings (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config)) -- `clickhouse_postgresql_user_config` (Block List, Max: 1) ClickhousePostgresql user configurable settings (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config)) -- `datadog_user_config` (Block List, Max: 1) Datadog user configurable settings (see [below for nested schema](#nestedblock--datadog_user_config)) +- `clickhouse_kafka_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config)) +- `clickhouse_postgresql_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config)) +- `datadog_user_config` (Block List) (see [below for nested schema](#nestedblock--datadog_user_config)) - `destination_endpoint_id` (String) Destination endpoint for the integration (if any) - `destination_service_name` (String) Destination service for the integration (if any) -- `external_aws_cloudwatch_metrics_user_config` (Block List, Max: 1) ExternalAwsCloudwatchMetrics user configurable settings (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config)) -- `kafka_connect_user_config` (Block List, Max: 1) KafkaConnect user configurable settings (see [below for nested schema](#nestedblock--kafka_connect_user_config)) -- `kafka_logs_user_config` (Block List, Max: 1) KafkaLogs user configurable settings (see [below for nested schema](#nestedblock--kafka_logs_user_config)) -- `kafka_mirrormaker_user_config` (Block List, Max: 1) KafkaMirrormaker user configurable settings (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config)) -- `logs_user_config` (Block List, Max: 1) Logs user configurable settings (see [below for nested schema](#nestedblock--logs_user_config)) -- `metrics_user_config` (Block List, Max: 1) Metrics user configurable settings (see [below for nested schema](#nestedblock--metrics_user_config)) +- `external_aws_cloudwatch_metrics_user_config` (Block List) External AWS CloudWatch Metrics integration user config (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config)) +- `kafka_connect_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--kafka_connect_user_config)) +- `kafka_logs_user_config` (Block List) (see [below for nested schema](#nestedblock--kafka_logs_user_config)) +- `kafka_mirrormaker_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config)) +- `logs_user_config` (Block List) (see [below for nested schema](#nestedblock--logs_user_config)) +- `metrics_user_config` (Block List) Integration user config (see [below for nested schema](#nestedblock--metrics_user_config)) - `source_endpoint_id` (String) Source endpoint for the integration (if any) - `source_service_name` (String) Source service for the integration (if any) - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) @@ -58,7 +58,7 @@ resource "aiven_service_integration" "my_integration_metrics" { Optional: -- `tables` (Block List, Max: 100) Tables to create. (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables)) +- `tables` (Block List) Tables to create (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables)) ### Nested Schema for `clickhouse_kafka_user_config.tables` @@ -72,7 +72,7 @@ Required: Optional: - `auto_offset_reset` (String) Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`. -- `columns` (Block List, Max: 100) Table columns. (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--columns)) +- `columns` (Block List) Table columns (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--columns)) - `date_time_input_format` (String) Method to read DateTime from text input formats. The default value is `basic`. - `handle_error_mode` (String) How to handle errors for Kafka engine. The default value is `default`. - `max_block_size` (Number) Number of row collected by poll(s) for flushing data from Kafka. The default value is `0`. @@ -80,7 +80,7 @@ Optional: - `num_consumers` (Number) The number of consumers per table per replica. The default value is `1`. - `poll_max_batch_size` (Number) Maximum amount of messages to be polled in a single Kafka poll. The default value is `0`. - `skip_broken_messages` (Number) Skip at least this number of broken messages from Kafka topic per block. The default value is `0`. -- `topics` (Block List, Max: 100) Kafka topics. (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--topics)) +- `topics` (Block List) Kafka topics (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--topics)) ### Nested Schema for `clickhouse_kafka_user_config.tables.columns` @@ -106,7 +106,7 @@ Required: Optional: -- `databases` (Block List, Max: 10) Databases to expose. (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config--databases)) +- `databases` (Block List) Databases to expose (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config--databases)) ### Nested Schema for `clickhouse_postgresql_user_config.databases` @@ -124,15 +124,15 @@ Optional: Optional: - `datadog_dbm_enabled` (Boolean) Enable Datadog Database Monitoring. -- `datadog_tags` (Block List, Max: 32) Custom tags provided by user. (see [below for nested schema](#nestedblock--datadog_user_config--datadog_tags)) +- `datadog_tags` (Block List) Custom tags provided by user (see [below for nested schema](#nestedblock--datadog_user_config--datadog_tags)) - `exclude_consumer_groups` (List of String) List of custom metrics. - `exclude_topics` (List of String) List of topics to exclude. - `include_consumer_groups` (List of String) List of custom metrics. - `include_topics` (List of String) List of topics to include. - `kafka_custom_metrics` (List of String) List of custom metrics. - `max_jmx_metrics` (Number) Maximum number of JMX metrics to send. -- `opensearch` (Block List, Max: 1) Datadog Opensearch Options. (see [below for nested schema](#nestedblock--datadog_user_config--opensearch)) -- `redis` (Block List, Max: 1) Datadog Redis Options. (see [below for nested schema](#nestedblock--datadog_user_config--redis)) +- `opensearch` (Block List) Datadog Opensearch Options (see [below for nested schema](#nestedblock--datadog_user_config--opensearch)) +- `redis` (Block List) Datadog Redis Options (see [below for nested schema](#nestedblock--datadog_user_config--redis)) ### Nested Schema for `datadog_user_config.datadog_tags` @@ -170,8 +170,8 @@ Optional: Optional: -- `dropped_metrics` (Block List, Max: 1024) Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics). (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--dropped_metrics)) -- `extra_metrics` (Block List, Max: 1024) Metrics to allow through to AWS CloudWatch (in addition to default metrics). (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--extra_metrics)) +- `dropped_metrics` (Block List) Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--dropped_metrics)) +- `extra_metrics` (Block List) Metrics to allow through to AWS CloudWatch (in addition to default metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--extra_metrics)) ### Nested Schema for `external_aws_cloudwatch_metrics_user_config.dropped_metrics` @@ -197,7 +197,7 @@ Required: Optional: -- `kafka_connect` (Block List, Max: 1) Kafka Connect service configuration values. (see [below for nested schema](#nestedblock--kafka_connect_user_config--kafka_connect)) +- `kafka_connect` (Block List) Kafka Connect service configuration values (see [below for nested schema](#nestedblock--kafka_connect_user_config--kafka_connect)) ### Nested Schema for `kafka_connect_user_config.kafka_connect` @@ -229,7 +229,7 @@ Optional: Optional: - `cluster_alias` (String) The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, '.', '_', and '-'. -- `kafka_mirrormaker` (Block List, Max: 1) Kafka MirrorMaker configuration values. (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--kafka_mirrormaker)) +- `kafka_mirrormaker` (Block List) Kafka MirrorMaker configuration values (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--kafka_mirrormaker)) ### Nested Schema for `kafka_mirrormaker_user_config.kafka_mirrormaker` @@ -263,7 +263,7 @@ Optional: - `database` (String) Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to 'metrics'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. - `retention_days` (Number) Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days. - `ro_username` (String) Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to 'metrics_reader'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. -- `source_mysql` (Block List, Max: 1) Configuration options for metrics where source service is MySQL. (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql)) +- `source_mysql` (Block List) Configuration options for metrics where source service is MySQL (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql)) - `username` (String) Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to 'metrics_writer'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service. @@ -271,7 +271,7 @@ Optional: Optional: -- `telegraf` (Block List, Max: 1) Configuration options for Telegraf MySQL input plugin. (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql--telegraf)) +- `telegraf` (Block List) Configuration options for Telegraf MySQL input plugin (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql--telegraf)) ### Nested Schema for `metrics_user_config.source_mysql.telegraf` @@ -301,11 +301,10 @@ Optional: Optional: -- `create` (String) -- `default` (String) -- `delete` (String) -- `read` (String) -- `update` (String) +- `create` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). +- `delete` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs. +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled. +- `update` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). ## Import Import is supported using the following syntax: ```shell diff --git a/examples_tests/base.go b/examples_tests/base.go index fa5144417..02a726b7f 100644 --- a/examples_tests/base.go +++ b/examples_tests/base.go @@ -64,6 +64,8 @@ func (s *BaseTestSuite) TearDownSuite() { } // withDefaults adds default options for terraform test +// +//lint:ignore U1000 Ignore unused function. Used in child structs func (s *BaseTestSuite) withDefaults(opts *terraform.Options) *terraform.Options { // No need to use lock file for dev build opts.Lock = false diff --git a/go.mod b/go.mod index a2fd9831f..c87f0ea8f 100644 --- a/go.mod +++ b/go.mod @@ -4,8 +4,10 @@ go 1.19 require ( github.com/aiven/aiven-go-client v1.36.0 + github.com/avast/retry-go v3.0.0+incompatible github.com/dave/jennifer v1.7.0 github.com/docker/go-units v0.5.0 + github.com/emicklei/go-restful/v3 v3.9.0 github.com/ettle/strcase v0.1.1 github.com/google/go-cmp v0.5.9 github.com/gruntwork-io/terratest v0.43.13 @@ -15,9 +17,12 @@ require ( github.com/hashicorp/terraform-plugin-mux v0.11.2 github.com/hashicorp/terraform-plugin-sdk/v2 v2.28.0 github.com/kelseyhightower/envconfig v1.4.0 + github.com/liip/sheriff v0.11.1 + github.com/stoewer/go-strcase v1.3.0 github.com/stretchr/testify v1.8.4 golang.org/x/exp v0.0.0-20230809150735-7b3493d9a819 golang.org/x/sync v0.3.0 + golang.org/x/tools v0.6.0 gopkg.in/yaml.v3 v3.0.1 ) @@ -70,7 +75,7 @@ require ( github.com/hashicorp/terraform-registry-address v0.2.1 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect github.com/hashicorp/yamux v0.1.1 // indirect - github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a // indirect + github.com/jinzhu/copier v0.4.0 github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/klauspost/compress v1.15.11 // indirect github.com/mattn/go-colorable v0.1.13 // indirect diff --git a/go.sum b/go.sum index b642b0806..1741c281b 100644 --- a/go.sum +++ b/go.sum @@ -206,6 +206,8 @@ github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/ github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= +github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0= +github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= github.com/aws/aws-sdk-go v1.44.122 h1:p6mw01WBaNpbdP2xrisz5tIkcNwzj/HysobNoaAHjgo= github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= @@ -239,6 +241,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= +github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -391,6 +395,7 @@ github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoD github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v0.0.0-20161031182605-e96d38404026/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -433,8 +438,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1: github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= -github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a h1:zPPuIq2jAWWPTrGt70eK/BSch+gFAGrNzecsoENgu2o= -github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a/go.mod h1:yL958EeXv8Ylng6IfnvG4oflryUi3vgA3xPs9hmII1s= +github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= +github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -457,6 +462,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/liip/sheriff v0.11.1 h1:52YGzskXFPSEnwfEtXnbPiMKKXJGm5IP45s8Ogw0Wyk= +github.com/liip/sheriff v0.11.1/go.mod h1:nVTQYHxfdIfOHnk5FREt4j6cnaSlJPUfXFVORfgGmTo= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= @@ -501,6 +508,8 @@ github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/skeema/knownhosts v1.1.0 h1:Wvr9V0MxhjRbl3f9nMnKnFfiWTJmtECJ9Njkea3ysW0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -835,6 +844,8 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/internal/plugin/provider.go b/internal/plugin/provider.go index 435879e7f..9a64a061d 100644 --- a/internal/plugin/provider.go +++ b/internal/plugin/provider.go @@ -14,6 +14,7 @@ import ( "github.com/aiven/terraform-provider-aiven/internal/common" "github.com/aiven/terraform-provider-aiven/internal/plugin/errmsg" "github.com/aiven/terraform-provider-aiven/internal/plugin/service/organization" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/serviceintegration" ) // AivenProvider is the provider implementation for Aiven. @@ -110,6 +111,7 @@ func (p *AivenProvider) Configure( func (p *AivenProvider) Resources(context.Context) []func() resource.Resource { return []func() resource.Resource{ organization.NewOrganizationResource, + serviceintegration.NewServiceIntegrationResource, } } @@ -117,6 +119,7 @@ func (p *AivenProvider) Resources(context.Context) []func() resource.Resource { func (p *AivenProvider) DataSources(context.Context) []func() datasource.DataSource { return []func() datasource.DataSource{ organization.NewOrganizationDataSource, + serviceintegration.NewServiceIntegrationDataSource, } } diff --git a/internal/plugin/service/serviceintegration/models.go b/internal/plugin/service/serviceintegration/models.go new file mode 100644 index 000000000..0660c95e7 --- /dev/null +++ b/internal/plugin/service/serviceintegration/models.go @@ -0,0 +1,98 @@ +package serviceintegration + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +const ( + idProjectIndex = 0 + idIntegrationIDIndex = 1 +) + +// Plugin framework doesn't support embedded structs +// https://github.com/hashicorp/terraform-plugin-framework/issues/242 +// We use resourceModel as base model, and copy state to/from dataSourceModel for datasource +type resourceModel struct { + Timeouts timeouts.Value `tfsdk:"timeouts"` + ID types.String `tfsdk:"id" copier:"ID"` + Project types.String `tfsdk:"project" copier:"Project"` + IntegrationID types.String `tfsdk:"integration_id" copier:"IntegrationID"` + DestinationEndpointID types.String `tfsdk:"destination_endpoint_id" copier:"DestinationEndpointID"` + DestinationServiceName types.String `tfsdk:"destination_service_name" copier:"DestinationServiceName"` + IntegrationType types.String `tfsdk:"integration_type" copier:"IntegrationType"` + SourceEndpointID types.String `tfsdk:"source_endpoint_id" copier:"SourceEndpointID"` + SourceServiceName types.String `tfsdk:"source_service_name" copier:"SourceServiceName"` + ClickhouseKafkaUserConfig types.List `tfsdk:"clickhouse_kafka_user_config" copier:"ClickhouseKafkaUserConfig"` + ClickhousePostgresqlUserConfig types.List `tfsdk:"clickhouse_postgresql_user_config" copier:"ClickhousePostgresqlUserConfig"` + DatadogUserConfig types.List `tfsdk:"datadog_user_config" copier:"DatadogUserConfig"` + ExternalAwsCloudwatchMetricsUserConfig types.List `tfsdk:"external_aws_cloudwatch_metrics_user_config" copier:"ExternalAwsCloudwatchMetricsUserConfig"` + KafkaConnectUserConfig types.List `tfsdk:"kafka_connect_user_config" copier:"KafkaConnectUserConfig"` + KafkaLogsUserConfig types.List `tfsdk:"kafka_logs_user_config" copier:"KafkaLogsUserConfig"` + KafkaMirrormakerUserConfig types.List `tfsdk:"kafka_mirrormaker_user_config" copier:"KafkaMirrormakerUserConfig"` + LogsUserConfig types.List `tfsdk:"logs_user_config" copier:"LogsUserConfig"` + MetricsUserConfig types.List `tfsdk:"metrics_user_config" copier:"MetricsUserConfig"` +} + +type dataSourceModel struct { + ID types.String `tfsdk:"id" copier:"ID"` + Project types.String `tfsdk:"project" copier:"Project"` + IntegrationID types.String `tfsdk:"integration_id" copier:"IntegrationID"` + DestinationEndpointID types.String `tfsdk:"destination_endpoint_id" copier:"DestinationEndpointID"` + DestinationServiceName types.String `tfsdk:"destination_service_name" copier:"DestinationServiceName"` + IntegrationType types.String `tfsdk:"integration_type" copier:"IntegrationType"` + SourceEndpointID types.String `tfsdk:"source_endpoint_id" copier:"SourceEndpointID"` + SourceServiceName types.String `tfsdk:"source_service_name" copier:"SourceServiceName"` + ClickhouseKafkaUserConfig types.List `tfsdk:"clickhouse_kafka_user_config" copier:"ClickhouseKafkaUserConfig"` + ClickhousePostgresqlUserConfig types.List `tfsdk:"clickhouse_postgresql_user_config" copier:"ClickhousePostgresqlUserConfig"` + DatadogUserConfig types.List `tfsdk:"datadog_user_config" copier:"DatadogUserConfig"` + ExternalAwsCloudwatchMetricsUserConfig types.List `tfsdk:"external_aws_cloudwatch_metrics_user_config" copier:"ExternalAwsCloudwatchMetricsUserConfig"` + KafkaConnectUserConfig types.List `tfsdk:"kafka_connect_user_config" copier:"KafkaConnectUserConfig"` + KafkaLogsUserConfig types.List `tfsdk:"kafka_logs_user_config" copier:"KafkaLogsUserConfig"` + KafkaMirrormakerUserConfig types.List `tfsdk:"kafka_mirrormaker_user_config" copier:"KafkaMirrormakerUserConfig"` + LogsUserConfig types.List `tfsdk:"logs_user_config" copier:"LogsUserConfig"` + MetricsUserConfig types.List `tfsdk:"metrics_user_config" copier:"MetricsUserConfig"` +} + +func (p *resourceModel) getID() string { + i := p.IntegrationID.ValueString() + if i != "" { + return i + } + return getIDIndex(p.ID.ValueString(), idIntegrationIDIndex) +} + +func (p *resourceModel) getProject() string { + project := p.Project.ValueString() + if project != "" { + return project + } + return getIDIndex(p.ID.ValueString(), idProjectIndex) +} + +func newEndpointID(project string, s *string) types.String { + if s != nil { + v := fmt.Sprintf("%s/%s", project, *s) + s = &v + } + return types.StringPointerValue(s) +} + +func getIDIndex(s string, i int) string { + list := strings.Split(s, "/") + if len(list)-1 == i { + return list[i] + } + return "" +} + +func getEndpointIDPointer(s string) *string { + id := getIDIndex(s, idIntegrationIDIndex) + if s == "" { + return nil + } + return &id +} diff --git a/internal/plugin/service/serviceintegration/service_integration_data_source.go b/internal/plugin/service/serviceintegration/service_integration_data_source.go new file mode 100644 index 000000000..ba60bba46 --- /dev/null +++ b/internal/plugin/service/serviceintegration/service_integration_data_source.go @@ -0,0 +1,140 @@ +package serviceintegration + +import ( + "context" + + "github.com/aiven/aiven-go-client" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/jinzhu/copier" + + "github.com/aiven/terraform-provider-aiven/internal/plugin/errmsg" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/clickhousekafka" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/clickhousepostgresql" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/datadog" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkaconnect" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkalogs" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkamirrormaker" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/logs" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/metrics" + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +var ( + _ datasource.DataSource = &serviceIntegrationDataSource{} + _ datasource.DataSourceWithConfigure = &serviceIntegrationDataSource{} +) + +func NewServiceIntegrationDataSource() datasource.DataSource { + return &serviceIntegrationDataSource{} +} + +type serviceIntegrationDataSource struct { + client *aiven.Client +} + +func (s *serviceIntegrationDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, _ *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + s.client = req.ProviderData.(*aiven.Client) +} + +func (s *serviceIntegrationDataSource) Metadata(_ context.Context, _ datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = "aiven_service_integration" +} + +func (s *serviceIntegrationDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "The Service Integration data source provides information about the existing Aiven Service Integration.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + Validators: []validator.String{endpointIDValidator}, + }, + "integration_id": schema.StringAttribute{ + Description: "Service Integration Id at aiven", + Computed: true, + }, + "destination_endpoint_id": schema.StringAttribute{ + Description: "Destination endpoint for the integration (if any)", + Computed: true, + Validators: []validator.String{endpointIDValidator}, + }, + "destination_service_name": schema.StringAttribute{ + Description: "Destination service for the integration (if any)", + Required: true, + }, + "integration_type": schema.StringAttribute{ + Description: "Type of the service integration. Possible values: " + schemautil.JoinQuoted(integrationTypes, ", ", "`"), + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(integrationTypes...), + }, + }, + "project": schema.StringAttribute{ + Description: "Project the integration belongs to", + Required: true, + }, + "source_endpoint_id": schema.StringAttribute{ + Description: "Source endpoint for the integration (if any)", + Computed: true, + Validators: []validator.String{endpointIDValidator}, + }, + "source_service_name": schema.StringAttribute{ + Description: "Source service for the integration (if any)", + Required: true, + }, + }, + Blocks: map[string]schema.Block{ + "clickhouse_kafka_user_config": clickhousekafka.NewDataSourceSchema(), + "clickhouse_postgresql_user_config": clickhousepostgresql.NewDataSourceSchema(), + "datadog_user_config": datadog.NewDataSourceSchema(), + "external_aws_cloudwatch_metrics_user_config": externalawscloudwatchmetrics.NewDataSourceSchema(), + "kafka_connect_user_config": kafkaconnect.NewDataSourceSchema(), + "kafka_logs_user_config": kafkalogs.NewDataSourceSchema(), + "kafka_mirrormaker_user_config": kafkamirrormaker.NewDataSourceSchema(), + "logs_user_config": logs.NewDataSourceSchema(), + "metrics_user_config": metrics.NewDataSourceSchema(), + }, + } +} + +// Read reads datasource +// All functions adapted for resourceModel, so we use it as donor +// Copies state from datasource to resource, then back, when things are done +func (s *serviceIntegrationDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var o dataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &o)...) + if resp.Diagnostics.HasError() { + return + } + + var res resourceModel + err := copier.Copy(&res, &o) + if err != nil { + resp.Diagnostics.AddError("data config copy error", err.Error()) + } + + dto, err := getSIByName(ctx, s.client, &res) + if err != nil { + resp.Diagnostics.AddError(errmsg.SummaryErrorReadingResource, err.Error()) + return + } + + loadFromDTO(ctx, &resp.Diagnostics, &res, dto) + if resp.Diagnostics.HasError() { + return + } + + err = copier.Copy(&o, &res) + if err != nil { + resp.Diagnostics.AddError("dto copy error", err.Error()) + } + + resp.Diagnostics.Append(resp.State.Set(ctx, o)...) +} diff --git a/internal/plugin/service/serviceintegration/service_integration_resource.go b/internal/plugin/service/serviceintegration/service_integration_resource.go new file mode 100644 index 000000000..b1353bedd --- /dev/null +++ b/internal/plugin/service/serviceintegration/service_integration_resource.go @@ -0,0 +1,341 @@ +package serviceintegration + +import ( + "context" + "fmt" + "regexp" + + "github.com/aiven/aiven-go-client" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/aiven/terraform-provider-aiven/internal/plugin/errmsg" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/clickhousekafka" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/clickhousepostgresql" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/datadog" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkaconnect" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkalogs" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkamirrormaker" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/logs" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/metrics" + "github.com/aiven/terraform-provider-aiven/internal/plugin/util" + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +var endpointIDValidator = stringvalidator.RegexMatches( + regexp.MustCompile(`^[a-zA-Z0-9_-]*/[a-zA-Z0-9_-]*$`), + "endpoint id should have the following format: project_name/endpoint_id", +) + +var ( + _ resource.Resource = &serviceIntegrationResource{} + _ resource.ResourceWithConfigure = &serviceIntegrationResource{} + _ resource.ResourceWithImportState = &serviceIntegrationResource{} +) + +func NewServiceIntegrationResource() resource.Resource { + return &serviceIntegrationResource{} +} + +type serviceIntegrationResource struct { + client *aiven.Client +} + +func (s *serviceIntegrationResource) Configure(_ context.Context, req resource.ConfigureRequest, _ *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + s.client = req.ProviderData.(*aiven.Client) +} + +func (s *serviceIntegrationResource) Metadata(_ context.Context, _ resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = "aiven_service_integration" +} + +func (s *serviceIntegrationResource) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = util.GeneralizeSchema(ctx, schema.Schema{ + Description: "The Service Integration resource allows the creation and management of Aiven Service Integrations.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + Validators: []validator.String{endpointIDValidator}, + }, + "integration_id": schema.StringAttribute{ + Description: "Service Integration Id at aiven", + Computed: true, + }, + "destination_endpoint_id": schema.StringAttribute{ + Description: "Destination endpoint for the integration (if any)", + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Optional: true, + Validators: []validator.String{ + endpointIDValidator, + stringvalidator.ExactlyOneOf( + path.MatchRoot("destination_endpoint_id"), + path.MatchRoot("destination_service_name"), + ), + }, + }, + "destination_service_name": schema.StringAttribute{ + Description: "Destination service for the integration (if any)", + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Optional: true, + }, + "integration_type": schema.StringAttribute{ + Description: "Type of the service integration. Possible values: " + schemautil.JoinQuoted(integrationTypes, ", ", "`"), + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(integrationTypes...), + }, + }, + "project": schema.StringAttribute{ + Description: "Project the integration belongs to", + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Required: true, + }, + "source_endpoint_id": schema.StringAttribute{ + Description: "Source endpoint for the integration (if any)", + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Optional: true, + Validators: []validator.String{ + endpointIDValidator, + stringvalidator.ExactlyOneOf( + path.MatchRoot("source_endpoint_id"), + path.MatchRoot("source_service_name"), + ), + }, + }, + "source_service_name": schema.StringAttribute{ + Description: "Source service for the integration (if any)", + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Optional: true, + }, + }, + Blocks: map[string]schema.Block{ + "clickhouse_kafka_user_config": clickhousekafka.NewResourceSchema(), + "clickhouse_postgresql_user_config": clickhousepostgresql.NewResourceSchema(), + "datadog_user_config": datadog.NewResourceSchema(), + "external_aws_cloudwatch_metrics_user_config": externalawscloudwatchmetrics.NewResourceSchema(), + "kafka_connect_user_config": kafkaconnect.NewResourceSchema(), + "kafka_logs_user_config": kafkalogs.NewResourceSchema(), + "kafka_mirrormaker_user_config": kafkamirrormaker.NewResourceSchema(), + "logs_user_config": logs.NewResourceSchema(), + "metrics_user_config": metrics.NewResourceSchema(), + }, + }) +} + +func (s *serviceIntegrationResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var o resourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &o)...) + if resp.Diagnostics.HasError() { + return + } + + // read_replicas can be only be created alongside the service. also the only way to promote the replica + // is to delete the service integration that was created, so we should make it least painful to do so. + // for now, we support to seemlessly import preexisting 'read_replica' service integrations in the resource create + // all other integrations should be imported using `terraform import` + if o.IntegrationType.ValueString() == readReplicaType { + if preexisting, err := getSIByName(ctx, s.client, &o); err != nil { + resp.Diagnostics.AddError("unable to search for possible preexisting 'read_replica' service integration", err.Error()) + return + } else if preexisting != nil { + o.IntegrationID = types.StringValue(preexisting.ServiceIntegrationID) + s.read(ctx, &resp.Diagnostics, &resp.State, &o) + return + } + } + + userConfig, err := expandUserConfig(ctx, &resp.Diagnostics, &o, true) + if err != nil { + resp.Diagnostics.AddError("Failed to expand user config", err.Error()) + return + } + createReq := aiven.CreateServiceIntegrationRequest{ + DestinationEndpointID: getEndpointIDPointer(o.DestinationEndpointID.ValueString()), + DestinationService: o.DestinationServiceName.ValueStringPointer(), + IntegrationType: o.IntegrationType.ValueString(), + SourceEndpointID: getEndpointIDPointer(o.SourceEndpointID.ValueString()), + SourceService: o.SourceServiceName.ValueStringPointer(), + UserConfig: userConfig, + } + + dto, err := s.client.ServiceIntegrations.Create( + o.Project.ValueString(), + createReq, + ) + + if err != nil { + resp.Diagnostics.AddError(errmsg.SummaryErrorCreatingResource, err.Error()) + return + } + + o.IntegrationID = types.StringValue(dto.ServiceIntegrationID) + s.read(ctx, &resp.Diagnostics, &resp.State, &o) +} + +func (s *serviceIntegrationResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var o resourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &o)...) + if resp.Diagnostics.HasError() { + return + } + s.read(ctx, &resp.Diagnostics, &resp.State, &o) +} + +func (s *serviceIntegrationResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var state resourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + var o resourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &o)...) + if resp.Diagnostics.HasError() { + return + } + + // Copies ID from the state + o.IntegrationID = state.IntegrationID + userConfig, err := expandUserConfig(ctx, &resp.Diagnostics, &o, false) + if err != nil { + resp.Diagnostics.AddError("Failed to expand user config", err.Error()) + return + } + + _, err = s.client.ServiceIntegrations.Update( + state.Project.ValueString(), + state.IntegrationID.ValueString(), + aiven.UpdateServiceIntegrationRequest{ + UserConfig: userConfig, + }, + ) + + if err != nil { + resp.Diagnostics.AddError(errmsg.SummaryErrorUpdatingResource, err.Error()) + return + } + + s.read(ctx, &resp.Diagnostics, &resp.State, &o) +} + +func (s *serviceIntegrationResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var o resourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &o)...) + if resp.Diagnostics.HasError() { + return + } + + err := s.client.ServiceIntegrations.Delete(o.Project.ValueString(), o.IntegrationID.ValueString()) + if err != nil { + resp.Diagnostics.AddError(errmsg.SummaryErrorDeletingResource, err.Error()) + } +} + +func (s *serviceIntegrationResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} + +// read reads from API and saves to state +func (s *serviceIntegrationResource) read(ctx context.Context, diags *diag.Diagnostics, state *tfsdk.State, o *resourceModel) { + dto, err := getSIByID(ctx, s.client, o) + if err != nil { + diags.AddError(errmsg.SummaryErrorReadingResource, err.Error()) + return + } + + loadFromDTO(ctx, diags, o, dto) + if diags.HasError() { + return + } + diags.Append(state.Set(ctx, o)...) +} + +// getSIByID gets ServiceIntegration by ID +func getSIByID(ctx context.Context, client *aiven.Client, o *resourceModel) (dto *aiven.ServiceIntegration, err error) { + id := o.getID() + project := o.getProject() + if len(id)*len(project) == 0 { + return nil, fmt.Errorf("no ID or project provided") + } + + return dto, util.WaitActive(ctx, func() error { + dto, err = client.ServiceIntegrations.Get(project, id) + if err != nil { + return err + } + if !dto.Active { + return fmt.Errorf("service integration is not active") + } + return nil + }) +} + +// getSIByName gets ServiceIntegration by name, todo: use context +func getSIByName(_ context.Context, client *aiven.Client, o *resourceModel) (*aiven.ServiceIntegration, error) { + project := o.Project.ValueString() + integrationType := o.IntegrationType.ValueString() + sourceServiceName := o.SourceServiceName.ValueString() + destinationServiceName := o.DestinationServiceName.ValueString() + + integrations, err := client.ServiceIntegrations.List(project, sourceServiceName) + if err != nil && !aiven.IsNotFound(err) { + return nil, fmt.Errorf("unable to get list of service integrations: %s", err) + } + + for _, i := range integrations { + if i.SourceService == nil || i.DestinationService == nil || i.ServiceIntegrationID == "" { + continue + } + + if i.IntegrationType == integrationType && + *i.SourceService == sourceServiceName && + *i.DestinationService == destinationServiceName { + return i, nil + } + } + + return nil, nil +} + +// loadFromDTO loads API values to terraform object +func loadFromDTO(ctx context.Context, diags *diag.Diagnostics, o *resourceModel, dto *aiven.ServiceIntegration) { + flattenUserConfig(ctx, diags, o, dto) + if diags.HasError() { + return + } + + id := o.getID() + project := o.getProject() + o.ID = newEndpointID(project, &id) + o.DestinationEndpointID = newEndpointID(project, dto.DestinationEndpointID) + o.DestinationServiceName = types.StringPointerValue(dto.DestinationService) + o.IntegrationType = types.StringValue(dto.IntegrationType) + o.SourceEndpointID = newEndpointID(project, dto.SourceEndpointID) + o.SourceServiceName = types.StringPointerValue(dto.SourceService) +} diff --git a/internal/plugin/service/serviceintegration/userconfig.go b/internal/plugin/service/serviceintegration/userconfig.go new file mode 100644 index 000000000..f8525a259 --- /dev/null +++ b/internal/plugin/service/serviceintegration/userconfig.go @@ -0,0 +1,134 @@ +package serviceintegration + +import ( + "context" + + "github.com/aiven/aiven-go-client" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/clickhousekafka" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/clickhousepostgresql" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/datadog" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkaconnect" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkalogs" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/kafkamirrormaker" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/logs" + "github.com/aiven/terraform-provider-aiven/internal/plugin/service/userconfig/integration/metrics" + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +const ( + clickhouseKafkaType = "clickhouse_kafka" + clickhousePostgresqlType = "clickhouse_postgresql" + datadogType = "datadog" + externalAwsCloudwatchMetricsType = "external_aws_cloudwatch_metrics" + kafkaConnectType = "kafka_connect" + kafkaLogsType = "kafka_logs" + kafkaMirrormakerType = "kafka_mirrormaker" + logsType = "logs" + metricsType = "metrics" + readReplicaType = "read_replica" +) + +var integrationTypes = []string{ + "alertmanager", + "cassandra_cross_service_cluster", + clickhouseKafkaType, + clickhousePostgresqlType, + "dashboard", + datadogType, + "datasource", + "external_aws_cloudwatch_logs", + externalAwsCloudwatchMetricsType, + "external_elasticsearch_logs", + "external_google_cloud_logging", + "external_opensearch_logs", + "flink", + "internal_connectivity", + "jolokia", + kafkaConnectType, + kafkaLogsType, + kafkaMirrormakerType, + logsType, + "m3aggregator", + "m3coordinator", + metricsType, + "opensearch_cross_cluster_replication", + "opensearch_cross_cluster_search", + "prometheus", + readReplicaType, + "rsyslog", + "schema_registry_proxy", +} + +// flattenUserConfig from aiven to terraform +func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *resourceModel, dto *aiven.ServiceIntegration) { + if dto.UserConfig == nil { + return + } + + // We set user config from Aiven only if it's been set in TF + // Otherwise it will produce invalid "after apply" + switch { + case isSet(o.ClickhouseKafkaUserConfig): + o.ClickhouseKafkaUserConfig = clickhousekafka.Flatten(ctx, diags, dto.UserConfig) + case isSet(o.ClickhousePostgresqlUserConfig): + o.ClickhousePostgresqlUserConfig = clickhousepostgresql.Flatten(ctx, diags, dto.UserConfig) + case isSet(o.DatadogUserConfig): + o.DatadogUserConfig = datadog.Flatten(ctx, diags, dto.UserConfig) + case isSet(o.ExternalAwsCloudwatchMetricsUserConfig): + o.ExternalAwsCloudwatchMetricsUserConfig = externalawscloudwatchmetrics.Flatten(ctx, diags, dto.UserConfig) + case isSet(o.KafkaConnectUserConfig): + o.KafkaConnectUserConfig = kafkaconnect.Flatten(ctx, diags, dto.UserConfig) + case isSet(o.KafkaLogsUserConfig): + o.KafkaLogsUserConfig = kafkalogs.Flatten(ctx, diags, dto.UserConfig) + case isSet(o.KafkaMirrormakerUserConfig): + o.KafkaMirrormakerUserConfig = kafkamirrormaker.Flatten(ctx, diags, dto.UserConfig) + case isSet(o.LogsUserConfig): + o.LogsUserConfig = logs.Flatten(ctx, diags, dto.UserConfig) + case isSet(o.MetricsUserConfig): + o.MetricsUserConfig = metrics.Flatten(ctx, diags, dto.UserConfig) + } +} + +// expandUserConfig from terraform to aiven +func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *resourceModel, create bool) (map[string]any, error) { + var marshal func(any) (map[string]any, error) + if create { + marshal = schemautil.MarshalCreateUserConfig + } else { + marshal = schemautil.MarshalUpdateUserConfig + } + + // If invalid integration type is set + // This will send wrong config to Aiven + // Which is sort of a validation too + switch { + case isSet(o.ClickhouseKafkaUserConfig): + return marshal(clickhousekafka.Expand(ctx, diags, o.ClickhouseKafkaUserConfig)) + case isSet(o.ClickhousePostgresqlUserConfig): + return marshal(clickhousepostgresql.Expand(ctx, diags, o.ClickhousePostgresqlUserConfig)) + case isSet(o.DatadogUserConfig): + return marshal(datadog.Expand(ctx, diags, o.DatadogUserConfig)) + case isSet(o.ExternalAwsCloudwatchMetricsUserConfig): + return marshal(externalawscloudwatchmetrics.Expand(ctx, diags, o.ExternalAwsCloudwatchMetricsUserConfig)) + case isSet(o.KafkaConnectUserConfig): + return marshal(kafkaconnect.Expand(ctx, diags, o.KafkaConnectUserConfig)) + case isSet(o.KafkaLogsUserConfig): + return marshal(kafkalogs.Expand(ctx, diags, o.KafkaLogsUserConfig)) + case isSet(o.KafkaMirrormakerUserConfig): + return marshal(kafkamirrormaker.Expand(ctx, diags, o.KafkaMirrormakerUserConfig)) + case isSet(o.LogsUserConfig): + return marshal(logs.Expand(ctx, diags, o.LogsUserConfig)) + case isSet(o.MetricsUserConfig): + return marshal(metrics.Expand(ctx, diags, o.MetricsUserConfig)) + default: + return nil, nil + } +} + +func isSet(o types.List) bool { + return !(o.IsUnknown() || o.IsNull()) +} diff --git a/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka.go b/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka.go new file mode 100644 index 000000000..02b903200 --- /dev/null +++ b/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka.go @@ -0,0 +1,408 @@ +// Code generated by user config generator. DO NOT EDIT. + +package clickhousekafka + +import ( + "context" + + listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + attr "github.com/hashicorp/terraform-plugin-framework/attr" + datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + diag "github.com/hashicorp/terraform-plugin-framework/diag" + resource "github.com/hashicorp/terraform-plugin-framework/resource/schema" + int64default "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64default" + stringdefault "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + validator "github.com/hashicorp/terraform-plugin-framework/schema/validator" + types "github.com/hashicorp/terraform-plugin-framework/types" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +// NewResourceSchema returns resource schema +func NewResourceSchema() resource.ListNestedBlock { + return resource.ListNestedBlock{ + Description: "Integration user config", + NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"tables": resource.ListNestedBlock{ + Description: "Tables to create", + NestedObject: resource.NestedBlockObject{ + Attributes: map[string]resource.Attribute{ + "auto_offset_reset": resource.StringAttribute{ + Computed: true, + Default: stringdefault.StaticString("earliest"), + Description: "Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.", + Optional: true, + }, + "data_format": resource.StringAttribute{ + Description: "Message data format. The default value is `JSONEachRow`.", + Required: true, + }, + "date_time_input_format": resource.StringAttribute{ + Computed: true, + Default: stringdefault.StaticString("basic"), + Description: "Method to read DateTime from text input formats. The default value is `basic`.", + Optional: true, + }, + "group_name": resource.StringAttribute{ + Description: "Kafka consumers group. The default value is `clickhouse`.", + Required: true, + }, + "handle_error_mode": resource.StringAttribute{ + Computed: true, + Default: stringdefault.StaticString("default"), + Description: "How to handle errors for Kafka engine. The default value is `default`.", + Optional: true, + }, + "max_block_size": resource.Int64Attribute{ + Computed: true, + Default: int64default.StaticInt64(0), + Description: "Number of row collected by poll(s) for flushing data from Kafka. The default value is `0`.", + Optional: true, + }, + "max_rows_per_message": resource.Int64Attribute{ + Computed: true, + Default: int64default.StaticInt64(1), + Description: "The maximum number of rows produced in one kafka message for row-based formats. The default value is `1`.", + Optional: true, + }, + "name": resource.StringAttribute{ + Description: "Name of the table.", + Required: true, + }, + "num_consumers": resource.Int64Attribute{ + Computed: true, + Default: int64default.StaticInt64(1), + Description: "The number of consumers per table per replica. The default value is `1`.", + Optional: true, + }, + "poll_max_batch_size": resource.Int64Attribute{ + Computed: true, + Default: int64default.StaticInt64(0), + Description: "Maximum amount of messages to be polled in a single Kafka poll. The default value is `0`.", + Optional: true, + }, + "skip_broken_messages": resource.Int64Attribute{ + Computed: true, + Default: int64default.StaticInt64(0), + Description: "Skip at least this number of broken messages from Kafka topic per block. The default value is `0`.", + Optional: true, + }, + }, + Blocks: map[string]resource.Block{ + "columns": resource.ListNestedBlock{ + Description: "Table columns", + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ + "name": resource.StringAttribute{ + Description: "Column name.", + Required: true, + }, + "type": resource.StringAttribute{ + Description: "Column type.", + Required: true, + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(100)}, + }, + "topics": resource.ListNestedBlock{ + Description: "Kafka topics", + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{"name": resource.StringAttribute{ + Description: "Name of the topic.", + Required: true, + }}}, + Validators: []validator.List{listvalidator.SizeAtMost(100)}, + }, + }, + }, + Validators: []validator.List{listvalidator.SizeAtMost(100)}, + }}}, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// NewDataSourceSchema returns datasource schema +func NewDataSourceSchema() datasource.ListNestedBlock { + return datasource.ListNestedBlock{ + Description: "Integration user config", + NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"tables": datasource.ListNestedBlock{ + Description: "Tables to create", + NestedObject: datasource.NestedBlockObject{ + Attributes: map[string]datasource.Attribute{ + "auto_offset_reset": datasource.StringAttribute{ + Computed: true, + Description: "Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.", + }, + "data_format": datasource.StringAttribute{ + Computed: true, + Description: "Message data format. The default value is `JSONEachRow`.", + }, + "date_time_input_format": datasource.StringAttribute{ + Computed: true, + Description: "Method to read DateTime from text input formats. The default value is `basic`.", + }, + "group_name": datasource.StringAttribute{ + Computed: true, + Description: "Kafka consumers group. The default value is `clickhouse`.", + }, + "handle_error_mode": datasource.StringAttribute{ + Computed: true, + Description: "How to handle errors for Kafka engine. The default value is `default`.", + }, + "max_block_size": datasource.Int64Attribute{ + Computed: true, + Description: "Number of row collected by poll(s) for flushing data from Kafka. The default value is `0`.", + }, + "max_rows_per_message": datasource.Int64Attribute{ + Computed: true, + Description: "The maximum number of rows produced in one kafka message for row-based formats. The default value is `1`.", + }, + "name": datasource.StringAttribute{ + Computed: true, + Description: "Name of the table.", + }, + "num_consumers": datasource.Int64Attribute{ + Computed: true, + Description: "The number of consumers per table per replica. The default value is `1`.", + }, + "poll_max_batch_size": datasource.Int64Attribute{ + Computed: true, + Description: "Maximum amount of messages to be polled in a single Kafka poll. The default value is `0`.", + }, + "skip_broken_messages": datasource.Int64Attribute{ + Computed: true, + Description: "Skip at least this number of broken messages from Kafka topic per block. The default value is `0`.", + }, + }, + Blocks: map[string]datasource.Block{ + "columns": datasource.ListNestedBlock{ + Description: "Table columns", + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ + "name": datasource.StringAttribute{ + Computed: true, + Description: "Column name.", + }, + "type": datasource.StringAttribute{ + Computed: true, + Description: "Column type.", + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(100)}, + }, + "topics": datasource.ListNestedBlock{ + Description: "Kafka topics", + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{"name": datasource.StringAttribute{ + Computed: true, + Description: "Name of the topic.", + }}}, + Validators: []validator.List{listvalidator.SizeAtMost(100)}, + }, + }, + }, + Validators: []validator.List{listvalidator.SizeAtMost(100)}, + }}}, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// tfoUserConfig Integration user config +type tfoUserConfig struct { + Tables types.List `tfsdk:"tables"` +} + +// dtoUserConfig request/response object +type dtoUserConfig struct { + Tables []*dtoTables `groups:"create,update" json:"tables,omitempty"` +} + +// expandUserConfig expands tf object into dto object +func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { + tablesVar := schemautil.ExpandListNested[tfoTables, dtoTables](ctx, diags, expandTables, o.Tables) + if diags.HasError() { + return nil + } + return &dtoUserConfig{Tables: tablesVar} +} + +// flattenUserConfig flattens dto object into tf object +func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { + tablesVar := schemautil.FlattenListNested[dtoTables, tfoTables](ctx, diags, flattenTables, tablesAttrs, o.Tables) + if diags.HasError() { + return nil + } + return &tfoUserConfig{Tables: tablesVar} +} + +var userConfigAttrs = map[string]attr.Type{"tables": types.ListType{ElemType: types.ObjectType{AttrTypes: tablesAttrs}}} + +// tfoTables Table to create +type tfoTables struct { + AutoOffsetReset types.String `tfsdk:"auto_offset_reset"` + Columns types.List `tfsdk:"columns"` + DataFormat types.String `tfsdk:"data_format"` + DateTimeInputFormat types.String `tfsdk:"date_time_input_format"` + GroupName types.String `tfsdk:"group_name"` + HandleErrorMode types.String `tfsdk:"handle_error_mode"` + MaxBlockSize types.Int64 `tfsdk:"max_block_size"` + MaxRowsPerMessage types.Int64 `tfsdk:"max_rows_per_message"` + Name types.String `tfsdk:"name"` + NumConsumers types.Int64 `tfsdk:"num_consumers"` + PollMaxBatchSize types.Int64 `tfsdk:"poll_max_batch_size"` + SkipBrokenMessages types.Int64 `tfsdk:"skip_broken_messages"` + Topics types.List `tfsdk:"topics"` +} + +// dtoTables request/response object +type dtoTables struct { + AutoOffsetReset *string `groups:"create,update" json:"auto_offset_reset,omitempty"` + Columns []*dtoColumns `groups:"create,update" json:"columns"` + DataFormat string `groups:"create,update" json:"data_format"` + DateTimeInputFormat *string `groups:"create,update" json:"date_time_input_format,omitempty"` + GroupName string `groups:"create,update" json:"group_name"` + HandleErrorMode *string `groups:"create,update" json:"handle_error_mode,omitempty"` + MaxBlockSize *int64 `groups:"create,update" json:"max_block_size,omitempty"` + MaxRowsPerMessage *int64 `groups:"create,update" json:"max_rows_per_message,omitempty"` + Name string `groups:"create,update" json:"name"` + NumConsumers *int64 `groups:"create,update" json:"num_consumers,omitempty"` + PollMaxBatchSize *int64 `groups:"create,update" json:"poll_max_batch_size,omitempty"` + SkipBrokenMessages *int64 `groups:"create,update" json:"skip_broken_messages,omitempty"` + Topics []*dtoTopics `groups:"create,update" json:"topics"` +} + +// expandTables expands tf object into dto object +func expandTables(ctx context.Context, diags *diag.Diagnostics, o *tfoTables) *dtoTables { + columnsVar := schemautil.ExpandListNested[tfoColumns, dtoColumns](ctx, diags, expandColumns, o.Columns) + if diags.HasError() { + return nil + } + topicsVar := schemautil.ExpandListNested[tfoTopics, dtoTopics](ctx, diags, expandTopics, o.Topics) + if diags.HasError() { + return nil + } + return &dtoTables{ + AutoOffsetReset: schemautil.ValueStringPointer(o.AutoOffsetReset), + Columns: columnsVar, + DataFormat: o.DataFormat.ValueString(), + DateTimeInputFormat: schemautil.ValueStringPointer(o.DateTimeInputFormat), + GroupName: o.GroupName.ValueString(), + HandleErrorMode: schemautil.ValueStringPointer(o.HandleErrorMode), + MaxBlockSize: schemautil.ValueInt64Pointer(o.MaxBlockSize), + MaxRowsPerMessage: schemautil.ValueInt64Pointer(o.MaxRowsPerMessage), + Name: o.Name.ValueString(), + NumConsumers: schemautil.ValueInt64Pointer(o.NumConsumers), + PollMaxBatchSize: schemautil.ValueInt64Pointer(o.PollMaxBatchSize), + SkipBrokenMessages: schemautil.ValueInt64Pointer(o.SkipBrokenMessages), + Topics: topicsVar, + } +} + +// flattenTables flattens dto object into tf object +func flattenTables(ctx context.Context, diags *diag.Diagnostics, o *dtoTables) *tfoTables { + columnsVar := schemautil.FlattenListNested[dtoColumns, tfoColumns](ctx, diags, flattenColumns, columnsAttrs, o.Columns) + if diags.HasError() { + return nil + } + topicsVar := schemautil.FlattenListNested[dtoTopics, tfoTopics](ctx, diags, flattenTopics, topicsAttrs, o.Topics) + if diags.HasError() { + return nil + } + return &tfoTables{ + AutoOffsetReset: types.StringPointerValue(o.AutoOffsetReset), + Columns: columnsVar, + DataFormat: types.StringValue(o.DataFormat), + DateTimeInputFormat: types.StringPointerValue(o.DateTimeInputFormat), + GroupName: types.StringValue(o.GroupName), + HandleErrorMode: types.StringPointerValue(o.HandleErrorMode), + MaxBlockSize: types.Int64PointerValue(o.MaxBlockSize), + MaxRowsPerMessage: types.Int64PointerValue(o.MaxRowsPerMessage), + Name: types.StringValue(o.Name), + NumConsumers: types.Int64PointerValue(o.NumConsumers), + PollMaxBatchSize: types.Int64PointerValue(o.PollMaxBatchSize), + SkipBrokenMessages: types.Int64PointerValue(o.SkipBrokenMessages), + Topics: topicsVar, + } +} + +var tablesAttrs = map[string]attr.Type{ + "auto_offset_reset": types.StringType, + "columns": types.ListType{ElemType: types.ObjectType{AttrTypes: columnsAttrs}}, + "data_format": types.StringType, + "date_time_input_format": types.StringType, + "group_name": types.StringType, + "handle_error_mode": types.StringType, + "max_block_size": types.Int64Type, + "max_rows_per_message": types.Int64Type, + "name": types.StringType, + "num_consumers": types.Int64Type, + "poll_max_batch_size": types.Int64Type, + "skip_broken_messages": types.Int64Type, + "topics": types.ListType{ElemType: types.ObjectType{AttrTypes: topicsAttrs}}, +} + +// tfoColumns Table column +type tfoColumns struct { + Name types.String `tfsdk:"name"` + Type types.String `tfsdk:"type"` +} + +// dtoColumns request/response object +type dtoColumns struct { + Name string `groups:"create,update" json:"name"` + Type string `groups:"create,update" json:"type"` +} + +// expandColumns expands tf object into dto object +func expandColumns(ctx context.Context, diags *diag.Diagnostics, o *tfoColumns) *dtoColumns { + return &dtoColumns{ + Name: o.Name.ValueString(), + Type: o.Type.ValueString(), + } +} + +// flattenColumns flattens dto object into tf object +func flattenColumns(ctx context.Context, diags *diag.Diagnostics, o *dtoColumns) *tfoColumns { + return &tfoColumns{ + Name: types.StringValue(o.Name), + Type: types.StringValue(o.Type), + } +} + +var columnsAttrs = map[string]attr.Type{ + "name": types.StringType, + "type": types.StringType, +} + +// tfoTopics Kafka topic +type tfoTopics struct { + Name types.String `tfsdk:"name"` +} + +// dtoTopics request/response object +type dtoTopics struct { + Name string `groups:"create,update" json:"name"` +} + +// expandTopics expands tf object into dto object +func expandTopics(ctx context.Context, diags *diag.Diagnostics, o *tfoTopics) *dtoTopics { + return &dtoTopics{Name: o.Name.ValueString()} +} + +// flattenTopics flattens dto object into tf object +func flattenTopics(ctx context.Context, diags *diag.Diagnostics, o *dtoTopics) *tfoTopics { + return &tfoTopics{Name: types.StringValue(o.Name)} +} + +var topicsAttrs = map[string]attr.Type{"name": types.StringType} + +// Expand public function that converts tf object into dto +func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { + return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +} + +// Flatten public function that converts dto into tf object +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { + o := new(dtoUserConfig) + err := schemautil.MapToDTO(m, o) + if err != nil { + diags.AddError("failed to marshal map user config to dto", err.Error()) + return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + } + return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) +} diff --git a/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka_test.go b/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka_test.go new file mode 100644 index 000000000..77316cfac --- /dev/null +++ b/internal/plugin/service/userconfig/integration/clickhousekafka/clickhouse_kafka_test.go @@ -0,0 +1,122 @@ +// Code generated by user config generator. DO NOT EDIT. + +package clickhousekafka + +import ( + "context" + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/stretchr/testify/require" + + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +const allFields = `{ + "tables": [ + { + "auto_offset_reset": "foo", + "columns": [ + { + "name": "foo", + "type": "foo" + } + ], + "data_format": "foo", + "date_time_input_format": "foo", + "group_name": "foo", + "handle_error_mode": "foo", + "max_block_size": 1, + "max_rows_per_message": 1, + "name": "foo", + "num_consumers": 1, + "poll_max_batch_size": 1, + "skip_broken_messages": 1, + "topics": [ + { + "name": "foo" + } + ] + } + ] +}` +const updateOnlyFields = `{ + "tables": [ + { + "auto_offset_reset": "foo", + "columns": [ + { + "name": "foo", + "type": "foo" + } + ], + "data_format": "foo", + "date_time_input_format": "foo", + "group_name": "foo", + "handle_error_mode": "foo", + "max_block_size": 1, + "max_rows_per_message": 1, + "name": "foo", + "num_consumers": 1, + "poll_max_batch_size": 1, + "skip_broken_messages": 1, + "topics": [ + { + "name": "foo" + } + ] + } + ] +}` + +func TestUserConfig(t *testing.T) { + cases := []struct { + name string + source string + expect string + marshal func(any) (map[string]any, error) + }{ + { + name: "fields to create resource", + source: allFields, + expect: allFields, + marshal: schemautil.MarshalCreateUserConfig, + }, + { + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + marshal: schemautil.MarshalUpdateUserConfig, + }, + } + + ctx := context.Background() + diags := new(diag.Diagnostics) + for _, opt := range cases { + t.Run(opt.name, func(t *testing.T) { + dto := new(dtoUserConfig) + err := json.Unmarshal([]byte(opt.source), dto) + require.NoError(t, err) + + // From json to TF + tfo := flattenUserConfig(ctx, diags, dto) + require.Empty(t, diags) + + // From TF to json + config := expandUserConfig(ctx, diags, tfo) + require.Empty(t, diags) + + // Run specific marshal (create or update resource) + dtoConfig, err := opt.marshal(config) + require.NoError(t, err) + + // Compares that output is strictly equal to the input + // If so, the flow is valid + b, err := json.MarshalIndent(dtoConfig, "", " ") + require.NoError(t, err) + require.Empty(t, cmp.Diff(opt.expect, string(b))) + }) + } +} diff --git a/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql.go b/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql.go new file mode 100644 index 000000000..a0cbd20b8 --- /dev/null +++ b/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql.go @@ -0,0 +1,145 @@ +// Code generated by user config generator. DO NOT EDIT. + +package clickhousepostgresql + +import ( + "context" + + listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + attr "github.com/hashicorp/terraform-plugin-framework/attr" + datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + diag "github.com/hashicorp/terraform-plugin-framework/diag" + resource "github.com/hashicorp/terraform-plugin-framework/resource/schema" + stringdefault "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + validator "github.com/hashicorp/terraform-plugin-framework/schema/validator" + types "github.com/hashicorp/terraform-plugin-framework/types" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +// NewResourceSchema returns resource schema +func NewResourceSchema() resource.ListNestedBlock { + return resource.ListNestedBlock{ + Description: "Integration user config", + NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"databases": resource.ListNestedBlock{ + Description: "Databases to expose", + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ + "database": resource.StringAttribute{ + Computed: true, + Default: stringdefault.StaticString("defaultdb"), + Description: "PostgreSQL database to expose. The default value is `defaultdb`.", + Optional: true, + }, + "schema": resource.StringAttribute{ + Computed: true, + Default: stringdefault.StaticString("public"), + Description: "PostgreSQL schema to expose. The default value is `public`.", + Optional: true, + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(10)}, + }}}, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// NewDataSourceSchema returns datasource schema +func NewDataSourceSchema() datasource.ListNestedBlock { + return datasource.ListNestedBlock{ + Description: "Integration user config", + NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"databases": datasource.ListNestedBlock{ + Description: "Databases to expose", + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ + "database": datasource.StringAttribute{ + Computed: true, + Description: "PostgreSQL database to expose. The default value is `defaultdb`.", + }, + "schema": datasource.StringAttribute{ + Computed: true, + Description: "PostgreSQL schema to expose. The default value is `public`.", + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(10)}, + }}}, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// tfoUserConfig Integration user config +type tfoUserConfig struct { + Databases types.List `tfsdk:"databases"` +} + +// dtoUserConfig request/response object +type dtoUserConfig struct { + Databases []*dtoDatabases `groups:"create,update" json:"databases,omitempty"` +} + +// expandUserConfig expands tf object into dto object +func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { + databasesVar := schemautil.ExpandListNested[tfoDatabases, dtoDatabases](ctx, diags, expandDatabases, o.Databases) + if diags.HasError() { + return nil + } + return &dtoUserConfig{Databases: databasesVar} +} + +// flattenUserConfig flattens dto object into tf object +func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { + databasesVar := schemautil.FlattenListNested[dtoDatabases, tfoDatabases](ctx, diags, flattenDatabases, databasesAttrs, o.Databases) + if diags.HasError() { + return nil + } + return &tfoUserConfig{Databases: databasesVar} +} + +var userConfigAttrs = map[string]attr.Type{"databases": types.ListType{ElemType: types.ObjectType{AttrTypes: databasesAttrs}}} + +// tfoDatabases Database to expose +type tfoDatabases struct { + Database types.String `tfsdk:"database"` + Schema types.String `tfsdk:"schema"` +} + +// dtoDatabases request/response object +type dtoDatabases struct { + Database *string `groups:"create,update" json:"database,omitempty"` + Schema *string `groups:"create,update" json:"schema,omitempty"` +} + +// expandDatabases expands tf object into dto object +func expandDatabases(ctx context.Context, diags *diag.Diagnostics, o *tfoDatabases) *dtoDatabases { + return &dtoDatabases{ + Database: schemautil.ValueStringPointer(o.Database), + Schema: schemautil.ValueStringPointer(o.Schema), + } +} + +// flattenDatabases flattens dto object into tf object +func flattenDatabases(ctx context.Context, diags *diag.Diagnostics, o *dtoDatabases) *tfoDatabases { + return &tfoDatabases{ + Database: types.StringPointerValue(o.Database), + Schema: types.StringPointerValue(o.Schema), + } +} + +var databasesAttrs = map[string]attr.Type{ + "database": types.StringType, + "schema": types.StringType, +} + +// Expand public function that converts tf object into dto +func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { + return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +} + +// Flatten public function that converts dto into tf object +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { + o := new(dtoUserConfig) + err := schemautil.MapToDTO(m, o) + if err != nil { + diags.AddError("failed to marshal map user config to dto", err.Error()) + return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + } + return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) +} diff --git a/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql_test.go b/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql_test.go new file mode 100644 index 000000000..217dea78b --- /dev/null +++ b/internal/plugin/service/userconfig/integration/clickhousepostgresql/clickhouse_postgresql_test.go @@ -0,0 +1,82 @@ +// Code generated by user config generator. DO NOT EDIT. + +package clickhousepostgresql + +import ( + "context" + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/stretchr/testify/require" + + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +const allFields = `{ + "databases": [ + { + "database": "foo", + "schema": "foo" + } + ] +}` +const updateOnlyFields = `{ + "databases": [ + { + "database": "foo", + "schema": "foo" + } + ] +}` + +func TestUserConfig(t *testing.T) { + cases := []struct { + name string + source string + expect string + marshal func(any) (map[string]any, error) + }{ + { + name: "fields to create resource", + source: allFields, + expect: allFields, + marshal: schemautil.MarshalCreateUserConfig, + }, + { + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + marshal: schemautil.MarshalUpdateUserConfig, + }, + } + + ctx := context.Background() + diags := new(diag.Diagnostics) + for _, opt := range cases { + t.Run(opt.name, func(t *testing.T) { + dto := new(dtoUserConfig) + err := json.Unmarshal([]byte(opt.source), dto) + require.NoError(t, err) + + // From json to TF + tfo := flattenUserConfig(ctx, diags, dto) + require.Empty(t, diags) + + // From TF to json + config := expandUserConfig(ctx, diags, tfo) + require.Empty(t, diags) + + // Run specific marshal (create or update resource) + dtoConfig, err := opt.marshal(config) + require.NoError(t, err) + + // Compares that output is strictly equal to the input + // If so, the flow is valid + b, err := json.MarshalIndent(dtoConfig, "", " ") + require.NoError(t, err) + require.Empty(t, cmp.Diff(opt.expect, string(b))) + }) + } +} diff --git a/internal/plugin/service/userconfig/integration/datadog/datadog.go b/internal/plugin/service/userconfig/integration/datadog/datadog.go new file mode 100644 index 000000000..8a144b214 --- /dev/null +++ b/internal/plugin/service/userconfig/integration/datadog/datadog.go @@ -0,0 +1,460 @@ +// Code generated by user config generator. DO NOT EDIT. + +package datadog + +import ( + "context" + + listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + attr "github.com/hashicorp/terraform-plugin-framework/attr" + datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + diag "github.com/hashicorp/terraform-plugin-framework/diag" + resource "github.com/hashicorp/terraform-plugin-framework/resource/schema" + booldefault "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault" + validator "github.com/hashicorp/terraform-plugin-framework/schema/validator" + types "github.com/hashicorp/terraform-plugin-framework/types" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +// NewResourceSchema returns resource schema +func NewResourceSchema() resource.ListNestedBlock { + return resource.ListNestedBlock{ + NestedObject: resource.NestedBlockObject{ + Attributes: map[string]resource.Attribute{ + "datadog_dbm_enabled": resource.BoolAttribute{ + Computed: true, + Description: "Enable Datadog Database Monitoring.", + Optional: true, + }, + "exclude_consumer_groups": resource.ListAttribute{ + Computed: true, + Description: "List of custom metrics.", + ElementType: types.StringType, + Optional: true, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + "exclude_topics": resource.ListAttribute{ + Computed: true, + Description: "List of topics to exclude.", + ElementType: types.StringType, + Optional: true, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + "include_consumer_groups": resource.ListAttribute{ + Computed: true, + Description: "List of custom metrics.", + ElementType: types.StringType, + Optional: true, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + "include_topics": resource.ListAttribute{ + Computed: true, + Description: "List of topics to include.", + ElementType: types.StringType, + Optional: true, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + "kafka_custom_metrics": resource.ListAttribute{ + Computed: true, + Description: "List of custom metrics.", + ElementType: types.StringType, + Optional: true, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + "max_jmx_metrics": resource.Int64Attribute{ + Computed: true, + Description: "Maximum number of JMX metrics to send.", + Optional: true, + }, + }, + Blocks: map[string]resource.Block{ + "datadog_tags": resource.ListNestedBlock{ + Description: "Custom tags provided by user", + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ + "comment": resource.StringAttribute{ + Computed: true, + Description: "Optional tag explanation.", + Optional: true, + }, + "tag": resource.StringAttribute{ + Description: "Tag format and usage are described here: https://docs.datadoghq.com/getting_started/tagging. Tags with prefix 'aiven-' are reserved for Aiven.", + Required: true, + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(32)}, + }, + "opensearch": resource.ListNestedBlock{ + Description: "Datadog Opensearch Options", + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ + "index_stats_enabled": resource.BoolAttribute{ + Computed: true, + Description: "Enable Datadog Opensearch Index Monitoring.", + Optional: true, + }, + "pending_task_stats_enabled": resource.BoolAttribute{ + Computed: true, + Description: "Enable Datadog Opensearch Pending Task Monitoring.", + Optional: true, + }, + "pshard_stats_enabled": resource.BoolAttribute{ + Computed: true, + Description: "Enable Datadog Opensearch Primary Shard Monitoring.", + Optional: true, + }, + }}, + }, + "redis": resource.ListNestedBlock{ + Description: "Datadog Redis Options", + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{"command_stats_enabled": resource.BoolAttribute{ + Computed: true, + Default: booldefault.StaticBool(false), + Description: "Enable command_stats option in the agent's configuration. The default value is `false`.", + Optional: true, + }}}, + }, + }, + }, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// NewDataSourceSchema returns datasource schema +func NewDataSourceSchema() datasource.ListNestedBlock { + return datasource.ListNestedBlock{ + NestedObject: datasource.NestedBlockObject{ + Attributes: map[string]datasource.Attribute{ + "datadog_dbm_enabled": datasource.BoolAttribute{ + Computed: true, + Description: "Enable Datadog Database Monitoring.", + }, + "exclude_consumer_groups": datasource.ListAttribute{ + Computed: true, + Description: "List of custom metrics.", + ElementType: types.StringType, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + "exclude_topics": datasource.ListAttribute{ + Computed: true, + Description: "List of topics to exclude.", + ElementType: types.StringType, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + "include_consumer_groups": datasource.ListAttribute{ + Computed: true, + Description: "List of custom metrics.", + ElementType: types.StringType, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + "include_topics": datasource.ListAttribute{ + Computed: true, + Description: "List of topics to include.", + ElementType: types.StringType, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + "kafka_custom_metrics": datasource.ListAttribute{ + Computed: true, + Description: "List of custom metrics.", + ElementType: types.StringType, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + "max_jmx_metrics": datasource.Int64Attribute{ + Computed: true, + Description: "Maximum number of JMX metrics to send.", + }, + }, + Blocks: map[string]datasource.Block{ + "datadog_tags": datasource.ListNestedBlock{ + Description: "Custom tags provided by user", + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ + "comment": datasource.StringAttribute{ + Computed: true, + Description: "Optional tag explanation.", + }, + "tag": datasource.StringAttribute{ + Computed: true, + Description: "Tag format and usage are described here: https://docs.datadoghq.com/getting_started/tagging. Tags with prefix 'aiven-' are reserved for Aiven.", + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(32)}, + }, + "opensearch": datasource.ListNestedBlock{ + Description: "Datadog Opensearch Options", + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ + "index_stats_enabled": datasource.BoolAttribute{ + Computed: true, + Description: "Enable Datadog Opensearch Index Monitoring.", + }, + "pending_task_stats_enabled": datasource.BoolAttribute{ + Computed: true, + Description: "Enable Datadog Opensearch Pending Task Monitoring.", + }, + "pshard_stats_enabled": datasource.BoolAttribute{ + Computed: true, + Description: "Enable Datadog Opensearch Primary Shard Monitoring.", + }, + }}, + }, + "redis": datasource.ListNestedBlock{ + Description: "Datadog Redis Options", + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{"command_stats_enabled": datasource.BoolAttribute{ + Computed: true, + Description: "Enable command_stats option in the agent's configuration. The default value is `false`.", + }}}, + }, + }, + }, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// tfoUserConfig +type tfoUserConfig struct { + DatadogDbmEnabled types.Bool `tfsdk:"datadog_dbm_enabled"` + DatadogTags types.List `tfsdk:"datadog_tags"` + ExcludeConsumerGroups types.List `tfsdk:"exclude_consumer_groups"` + ExcludeTopics types.List `tfsdk:"exclude_topics"` + IncludeConsumerGroups types.List `tfsdk:"include_consumer_groups"` + IncludeTopics types.List `tfsdk:"include_topics"` + KafkaCustomMetrics types.List `tfsdk:"kafka_custom_metrics"` + MaxJmxMetrics types.Int64 `tfsdk:"max_jmx_metrics"` + Opensearch types.List `tfsdk:"opensearch"` + Redis types.List `tfsdk:"redis"` +} + +// dtoUserConfig request/response object +type dtoUserConfig struct { + DatadogDbmEnabled *bool `groups:"create,update" json:"datadog_dbm_enabled,omitempty"` + DatadogTags []*dtoDatadogTags `groups:"create,update" json:"datadog_tags,omitempty"` + ExcludeConsumerGroups []string `groups:"create,update" json:"exclude_consumer_groups,omitempty"` + ExcludeTopics []string `groups:"create,update" json:"exclude_topics,omitempty"` + IncludeConsumerGroups []string `groups:"create,update" json:"include_consumer_groups,omitempty"` + IncludeTopics []string `groups:"create,update" json:"include_topics,omitempty"` + KafkaCustomMetrics []string `groups:"create,update" json:"kafka_custom_metrics,omitempty"` + MaxJmxMetrics *int64 `groups:"create,update" json:"max_jmx_metrics,omitempty"` + Opensearch *dtoOpensearch `groups:"create,update" json:"opensearch,omitempty"` + Redis *dtoRedis `groups:"create,update" json:"redis,omitempty"` +} + +// expandUserConfig expands tf object into dto object +func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { + datadogTagsVar := schemautil.ExpandListNested[tfoDatadogTags, dtoDatadogTags](ctx, diags, expandDatadogTags, o.DatadogTags) + if diags.HasError() { + return nil + } + excludeConsumerGroupsVar := schemautil.ExpandList[string](ctx, diags, o.ExcludeConsumerGroups) + if diags.HasError() { + return nil + } + excludeTopicsVar := schemautil.ExpandList[string](ctx, diags, o.ExcludeTopics) + if diags.HasError() { + return nil + } + includeConsumerGroupsVar := schemautil.ExpandList[string](ctx, diags, o.IncludeConsumerGroups) + if diags.HasError() { + return nil + } + includeTopicsVar := schemautil.ExpandList[string](ctx, diags, o.IncludeTopics) + if diags.HasError() { + return nil + } + kafkaCustomMetricsVar := schemautil.ExpandList[string](ctx, diags, o.KafkaCustomMetrics) + if diags.HasError() { + return nil + } + opensearchVar := schemautil.ExpandListBlockNested[tfoOpensearch, dtoOpensearch](ctx, diags, expandOpensearch, o.Opensearch) + if diags.HasError() { + return nil + } + redisVar := schemautil.ExpandListBlockNested[tfoRedis, dtoRedis](ctx, diags, expandRedis, o.Redis) + if diags.HasError() { + return nil + } + return &dtoUserConfig{ + DatadogDbmEnabled: schemautil.ValueBoolPointer(o.DatadogDbmEnabled), + DatadogTags: datadogTagsVar, + ExcludeConsumerGroups: excludeConsumerGroupsVar, + ExcludeTopics: excludeTopicsVar, + IncludeConsumerGroups: includeConsumerGroupsVar, + IncludeTopics: includeTopicsVar, + KafkaCustomMetrics: kafkaCustomMetricsVar, + MaxJmxMetrics: schemautil.ValueInt64Pointer(o.MaxJmxMetrics), + Opensearch: opensearchVar, + Redis: redisVar, + } +} + +// flattenUserConfig flattens dto object into tf object +func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { + datadogTagsVar := schemautil.FlattenListNested[dtoDatadogTags, tfoDatadogTags](ctx, diags, flattenDatadogTags, datadogTagsAttrs, o.DatadogTags) + if diags.HasError() { + return nil + } + excludeConsumerGroupsVar, d := types.ListValueFrom(ctx, types.StringType, o.ExcludeConsumerGroups) + diags.Append(d...) + if diags.HasError() { + return nil + } + excludeTopicsVar, d := types.ListValueFrom(ctx, types.StringType, o.ExcludeTopics) + diags.Append(d...) + if diags.HasError() { + return nil + } + includeConsumerGroupsVar, d := types.ListValueFrom(ctx, types.StringType, o.IncludeConsumerGroups) + diags.Append(d...) + if diags.HasError() { + return nil + } + includeTopicsVar, d := types.ListValueFrom(ctx, types.StringType, o.IncludeTopics) + diags.Append(d...) + if diags.HasError() { + return nil + } + kafkaCustomMetricsVar, d := types.ListValueFrom(ctx, types.StringType, o.KafkaCustomMetrics) + diags.Append(d...) + if diags.HasError() { + return nil + } + opensearchVar := schemautil.FlattenListBlockNested[dtoOpensearch, tfoOpensearch](ctx, diags, flattenOpensearch, opensearchAttrs, o.Opensearch) + if diags.HasError() { + return nil + } + redisVar := schemautil.FlattenListBlockNested[dtoRedis, tfoRedis](ctx, diags, flattenRedis, redisAttrs, o.Redis) + if diags.HasError() { + return nil + } + return &tfoUserConfig{ + DatadogDbmEnabled: types.BoolPointerValue(o.DatadogDbmEnabled), + DatadogTags: datadogTagsVar, + ExcludeConsumerGroups: excludeConsumerGroupsVar, + ExcludeTopics: excludeTopicsVar, + IncludeConsumerGroups: includeConsumerGroupsVar, + IncludeTopics: includeTopicsVar, + KafkaCustomMetrics: kafkaCustomMetricsVar, + MaxJmxMetrics: types.Int64PointerValue(o.MaxJmxMetrics), + Opensearch: opensearchVar, + Redis: redisVar, + } +} + +var userConfigAttrs = map[string]attr.Type{ + "datadog_dbm_enabled": types.BoolType, + "datadog_tags": types.ListType{ElemType: types.ObjectType{AttrTypes: datadogTagsAttrs}}, + "exclude_consumer_groups": types.ListType{ElemType: types.StringType}, + "exclude_topics": types.ListType{ElemType: types.StringType}, + "include_consumer_groups": types.ListType{ElemType: types.StringType}, + "include_topics": types.ListType{ElemType: types.StringType}, + "kafka_custom_metrics": types.ListType{ElemType: types.StringType}, + "max_jmx_metrics": types.Int64Type, + "opensearch": types.ListType{ElemType: types.ObjectType{AttrTypes: opensearchAttrs}}, + "redis": types.ListType{ElemType: types.ObjectType{AttrTypes: redisAttrs}}, +} + +// tfoDatadogTags Datadog tag defined by user +type tfoDatadogTags struct { + Comment types.String `tfsdk:"comment"` + Tag types.String `tfsdk:"tag"` +} + +// dtoDatadogTags request/response object +type dtoDatadogTags struct { + Comment *string `groups:"create,update" json:"comment,omitempty"` + Tag string `groups:"create,update" json:"tag"` +} + +// expandDatadogTags expands tf object into dto object +func expandDatadogTags(ctx context.Context, diags *diag.Diagnostics, o *tfoDatadogTags) *dtoDatadogTags { + return &dtoDatadogTags{ + Comment: schemautil.ValueStringPointer(o.Comment), + Tag: o.Tag.ValueString(), + } +} + +// flattenDatadogTags flattens dto object into tf object +func flattenDatadogTags(ctx context.Context, diags *diag.Diagnostics, o *dtoDatadogTags) *tfoDatadogTags { + return &tfoDatadogTags{ + Comment: types.StringPointerValue(o.Comment), + Tag: types.StringValue(o.Tag), + } +} + +var datadogTagsAttrs = map[string]attr.Type{ + "comment": types.StringType, + "tag": types.StringType, +} + +// tfoOpensearch Datadog Opensearch Options +type tfoOpensearch struct { + IndexStatsEnabled types.Bool `tfsdk:"index_stats_enabled"` + PendingTaskStatsEnabled types.Bool `tfsdk:"pending_task_stats_enabled"` + PshardStatsEnabled types.Bool `tfsdk:"pshard_stats_enabled"` +} + +// dtoOpensearch request/response object +type dtoOpensearch struct { + IndexStatsEnabled *bool `groups:"create,update" json:"index_stats_enabled,omitempty"` + PendingTaskStatsEnabled *bool `groups:"create,update" json:"pending_task_stats_enabled,omitempty"` + PshardStatsEnabled *bool `groups:"create,update" json:"pshard_stats_enabled,omitempty"` +} + +// expandOpensearch expands tf object into dto object +func expandOpensearch(ctx context.Context, diags *diag.Diagnostics, o *tfoOpensearch) *dtoOpensearch { + return &dtoOpensearch{ + IndexStatsEnabled: schemautil.ValueBoolPointer(o.IndexStatsEnabled), + PendingTaskStatsEnabled: schemautil.ValueBoolPointer(o.PendingTaskStatsEnabled), + PshardStatsEnabled: schemautil.ValueBoolPointer(o.PshardStatsEnabled), + } +} + +// flattenOpensearch flattens dto object into tf object +func flattenOpensearch(ctx context.Context, diags *diag.Diagnostics, o *dtoOpensearch) *tfoOpensearch { + return &tfoOpensearch{ + IndexStatsEnabled: types.BoolPointerValue(o.IndexStatsEnabled), + PendingTaskStatsEnabled: types.BoolPointerValue(o.PendingTaskStatsEnabled), + PshardStatsEnabled: types.BoolPointerValue(o.PshardStatsEnabled), + } +} + +var opensearchAttrs = map[string]attr.Type{ + "index_stats_enabled": types.BoolType, + "pending_task_stats_enabled": types.BoolType, + "pshard_stats_enabled": types.BoolType, +} + +// tfoRedis Datadog Redis Options +type tfoRedis struct { + CommandStatsEnabled types.Bool `tfsdk:"command_stats_enabled"` +} + +// dtoRedis request/response object +type dtoRedis struct { + CommandStatsEnabled *bool `groups:"create,update" json:"command_stats_enabled,omitempty"` +} + +// expandRedis expands tf object into dto object +func expandRedis(ctx context.Context, diags *diag.Diagnostics, o *tfoRedis) *dtoRedis { + return &dtoRedis{CommandStatsEnabled: schemautil.ValueBoolPointer(o.CommandStatsEnabled)} +} + +// flattenRedis flattens dto object into tf object +func flattenRedis(ctx context.Context, diags *diag.Diagnostics, o *dtoRedis) *tfoRedis { + return &tfoRedis{CommandStatsEnabled: types.BoolPointerValue(o.CommandStatsEnabled)} +} + +var redisAttrs = map[string]attr.Type{"command_stats_enabled": types.BoolType} + +// Expand public function that converts tf object into dto +func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { + return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +} + +// Flatten public function that converts dto into tf object +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { + o := new(dtoUserConfig) + err := schemautil.MapToDTO(m, o) + if err != nil { + diags.AddError("failed to marshal map user config to dto", err.Error()) + return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + } + return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) +} diff --git a/internal/plugin/service/userconfig/integration/datadog/datadog_test.go b/internal/plugin/service/userconfig/integration/datadog/datadog_test.go new file mode 100644 index 000000000..7dc27e200 --- /dev/null +++ b/internal/plugin/service/userconfig/integration/datadog/datadog_test.go @@ -0,0 +1,132 @@ +// Code generated by user config generator. DO NOT EDIT. + +package datadog + +import ( + "context" + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/stretchr/testify/require" + + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +const allFields = `{ + "datadog_dbm_enabled": true, + "datadog_tags": [ + { + "comment": "foo", + "tag": "foo" + } + ], + "exclude_consumer_groups": [ + "foo" + ], + "exclude_topics": [ + "foo" + ], + "include_consumer_groups": [ + "foo" + ], + "include_topics": [ + "foo" + ], + "kafka_custom_metrics": [ + "foo" + ], + "max_jmx_metrics": 1, + "opensearch": { + "index_stats_enabled": true, + "pending_task_stats_enabled": true, + "pshard_stats_enabled": true + }, + "redis": { + "command_stats_enabled": true + } +}` +const updateOnlyFields = `{ + "datadog_dbm_enabled": true, + "datadog_tags": [ + { + "comment": "foo", + "tag": "foo" + } + ], + "exclude_consumer_groups": [ + "foo" + ], + "exclude_topics": [ + "foo" + ], + "include_consumer_groups": [ + "foo" + ], + "include_topics": [ + "foo" + ], + "kafka_custom_metrics": [ + "foo" + ], + "max_jmx_metrics": 1, + "opensearch": { + "index_stats_enabled": true, + "pending_task_stats_enabled": true, + "pshard_stats_enabled": true + }, + "redis": { + "command_stats_enabled": true + } +}` + +func TestUserConfig(t *testing.T) { + cases := []struct { + name string + source string + expect string + marshal func(any) (map[string]any, error) + }{ + { + name: "fields to create resource", + source: allFields, + expect: allFields, + marshal: schemautil.MarshalCreateUserConfig, + }, + { + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + marshal: schemautil.MarshalUpdateUserConfig, + }, + } + + ctx := context.Background() + diags := new(diag.Diagnostics) + for _, opt := range cases { + t.Run(opt.name, func(t *testing.T) { + dto := new(dtoUserConfig) + err := json.Unmarshal([]byte(opt.source), dto) + require.NoError(t, err) + + // From json to TF + tfo := flattenUserConfig(ctx, diags, dto) + require.Empty(t, diags) + + // From TF to json + config := expandUserConfig(ctx, diags, tfo) + require.Empty(t, diags) + + // Run specific marshal (create or update resource) + dtoConfig, err := opt.marshal(config) + require.NoError(t, err) + + // Compares that output is strictly equal to the input + // If so, the flow is valid + b, err := json.MarshalIndent(dtoConfig, "", " ") + require.NoError(t, err) + require.Empty(t, cmp.Diff(opt.expect, string(b))) + }) + } +} diff --git a/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics.go b/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics.go new file mode 100644 index 000000000..bc0a75e75 --- /dev/null +++ b/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics.go @@ -0,0 +1,224 @@ +// Code generated by user config generator. DO NOT EDIT. + +package externalawscloudwatchmetrics + +import ( + "context" + + listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + attr "github.com/hashicorp/terraform-plugin-framework/attr" + datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + diag "github.com/hashicorp/terraform-plugin-framework/diag" + resource "github.com/hashicorp/terraform-plugin-framework/resource/schema" + validator "github.com/hashicorp/terraform-plugin-framework/schema/validator" + types "github.com/hashicorp/terraform-plugin-framework/types" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +// NewResourceSchema returns resource schema +func NewResourceSchema() resource.ListNestedBlock { + return resource.ListNestedBlock{ + Description: "External AWS CloudWatch Metrics integration user config", + NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{ + "dropped_metrics": resource.ListNestedBlock{ + Description: "Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics)", + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ + "field": resource.StringAttribute{ + Description: "Identifier of a value in the metric.", + Required: true, + }, + "metric": resource.StringAttribute{ + Description: "Identifier of the metric.", + Required: true, + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + "extra_metrics": resource.ListNestedBlock{ + Description: "Metrics to allow through to AWS CloudWatch (in addition to default metrics)", + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ + "field": resource.StringAttribute{ + Description: "Identifier of a value in the metric.", + Required: true, + }, + "metric": resource.StringAttribute{ + Description: "Identifier of the metric.", + Required: true, + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// NewDataSourceSchema returns datasource schema +func NewDataSourceSchema() datasource.ListNestedBlock { + return datasource.ListNestedBlock{ + Description: "External AWS CloudWatch Metrics integration user config", + NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{ + "dropped_metrics": datasource.ListNestedBlock{ + Description: "Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics)", + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ + "field": datasource.StringAttribute{ + Computed: true, + Description: "Identifier of a value in the metric.", + }, + "metric": datasource.StringAttribute{ + Computed: true, + Description: "Identifier of the metric.", + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + "extra_metrics": datasource.ListNestedBlock{ + Description: "Metrics to allow through to AWS CloudWatch (in addition to default metrics)", + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ + "field": datasource.StringAttribute{ + Computed: true, + Description: "Identifier of a value in the metric.", + }, + "metric": datasource.StringAttribute{ + Computed: true, + Description: "Identifier of the metric.", + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(1024)}, + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// tfoUserConfig External AWS CloudWatch Metrics integration user config +type tfoUserConfig struct { + DroppedMetrics types.List `tfsdk:"dropped_metrics"` + ExtraMetrics types.List `tfsdk:"extra_metrics"` +} + +// dtoUserConfig request/response object +type dtoUserConfig struct { + DroppedMetrics []*dtoDroppedMetrics `groups:"create,update" json:"dropped_metrics,omitempty"` + ExtraMetrics []*dtoExtraMetrics `groups:"create,update" json:"extra_metrics,omitempty"` +} + +// expandUserConfig expands tf object into dto object +func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { + droppedMetricsVar := schemautil.ExpandListNested[tfoDroppedMetrics, dtoDroppedMetrics](ctx, diags, expandDroppedMetrics, o.DroppedMetrics) + if diags.HasError() { + return nil + } + extraMetricsVar := schemautil.ExpandListNested[tfoExtraMetrics, dtoExtraMetrics](ctx, diags, expandExtraMetrics, o.ExtraMetrics) + if diags.HasError() { + return nil + } + return &dtoUserConfig{ + DroppedMetrics: droppedMetricsVar, + ExtraMetrics: extraMetricsVar, + } +} + +// flattenUserConfig flattens dto object into tf object +func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { + droppedMetricsVar := schemautil.FlattenListNested[dtoDroppedMetrics, tfoDroppedMetrics](ctx, diags, flattenDroppedMetrics, droppedMetricsAttrs, o.DroppedMetrics) + if diags.HasError() { + return nil + } + extraMetricsVar := schemautil.FlattenListNested[dtoExtraMetrics, tfoExtraMetrics](ctx, diags, flattenExtraMetrics, extraMetricsAttrs, o.ExtraMetrics) + if diags.HasError() { + return nil + } + return &tfoUserConfig{ + DroppedMetrics: droppedMetricsVar, + ExtraMetrics: extraMetricsVar, + } +} + +var userConfigAttrs = map[string]attr.Type{ + "dropped_metrics": types.ListType{ElemType: types.ObjectType{AttrTypes: droppedMetricsAttrs}}, + "extra_metrics": types.ListType{ElemType: types.ObjectType{AttrTypes: extraMetricsAttrs}}, +} + +// tfoDroppedMetrics Metric name and subfield +type tfoDroppedMetrics struct { + Field types.String `tfsdk:"field"` + Metric types.String `tfsdk:"metric"` +} + +// dtoDroppedMetrics request/response object +type dtoDroppedMetrics struct { + Field string `groups:"create,update" json:"field"` + Metric string `groups:"create,update" json:"metric"` +} + +// expandDroppedMetrics expands tf object into dto object +func expandDroppedMetrics(ctx context.Context, diags *diag.Diagnostics, o *tfoDroppedMetrics) *dtoDroppedMetrics { + return &dtoDroppedMetrics{ + Field: o.Field.ValueString(), + Metric: o.Metric.ValueString(), + } +} + +// flattenDroppedMetrics flattens dto object into tf object +func flattenDroppedMetrics(ctx context.Context, diags *diag.Diagnostics, o *dtoDroppedMetrics) *tfoDroppedMetrics { + return &tfoDroppedMetrics{ + Field: types.StringValue(o.Field), + Metric: types.StringValue(o.Metric), + } +} + +var droppedMetricsAttrs = map[string]attr.Type{ + "field": types.StringType, + "metric": types.StringType, +} + +// tfoExtraMetrics Metric name and subfield +type tfoExtraMetrics struct { + Field types.String `tfsdk:"field"` + Metric types.String `tfsdk:"metric"` +} + +// dtoExtraMetrics request/response object +type dtoExtraMetrics struct { + Field string `groups:"create,update" json:"field"` + Metric string `groups:"create,update" json:"metric"` +} + +// expandExtraMetrics expands tf object into dto object +func expandExtraMetrics(ctx context.Context, diags *diag.Diagnostics, o *tfoExtraMetrics) *dtoExtraMetrics { + return &dtoExtraMetrics{ + Field: o.Field.ValueString(), + Metric: o.Metric.ValueString(), + } +} + +// flattenExtraMetrics flattens dto object into tf object +func flattenExtraMetrics(ctx context.Context, diags *diag.Diagnostics, o *dtoExtraMetrics) *tfoExtraMetrics { + return &tfoExtraMetrics{ + Field: types.StringValue(o.Field), + Metric: types.StringValue(o.Metric), + } +} + +var extraMetricsAttrs = map[string]attr.Type{ + "field": types.StringType, + "metric": types.StringType, +} + +// Expand public function that converts tf object into dto +func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { + return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +} + +// Flatten public function that converts dto into tf object +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { + o := new(dtoUserConfig) + err := schemautil.MapToDTO(m, o) + if err != nil { + diags.AddError("failed to marshal map user config to dto", err.Error()) + return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + } + return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) +} diff --git a/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics_test.go b/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics_test.go new file mode 100644 index 000000000..9cf794599 --- /dev/null +++ b/internal/plugin/service/userconfig/integration/externalawscloudwatchmetrics/external_aws_cloudwatch_metrics_test.go @@ -0,0 +1,94 @@ +// Code generated by user config generator. DO NOT EDIT. + +package externalawscloudwatchmetrics + +import ( + "context" + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/stretchr/testify/require" + + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +const allFields = `{ + "dropped_metrics": [ + { + "field": "foo", + "metric": "foo" + } + ], + "extra_metrics": [ + { + "field": "foo", + "metric": "foo" + } + ] +}` +const updateOnlyFields = `{ + "dropped_metrics": [ + { + "field": "foo", + "metric": "foo" + } + ], + "extra_metrics": [ + { + "field": "foo", + "metric": "foo" + } + ] +}` + +func TestUserConfig(t *testing.T) { + cases := []struct { + name string + source string + expect string + marshal func(any) (map[string]any, error) + }{ + { + name: "fields to create resource", + source: allFields, + expect: allFields, + marshal: schemautil.MarshalCreateUserConfig, + }, + { + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + marshal: schemautil.MarshalUpdateUserConfig, + }, + } + + ctx := context.Background() + diags := new(diag.Diagnostics) + for _, opt := range cases { + t.Run(opt.name, func(t *testing.T) { + dto := new(dtoUserConfig) + err := json.Unmarshal([]byte(opt.source), dto) + require.NoError(t, err) + + // From json to TF + tfo := flattenUserConfig(ctx, diags, dto) + require.Empty(t, diags) + + // From TF to json + config := expandUserConfig(ctx, diags, tfo) + require.Empty(t, diags) + + // Run specific marshal (create or update resource) + dtoConfig, err := opt.marshal(config) + require.NoError(t, err) + + // Compares that output is strictly equal to the input + // If so, the flow is valid + b, err := json.MarshalIndent(dtoConfig, "", " ") + require.NoError(t, err) + require.Empty(t, cmp.Diff(opt.expect, string(b))) + }) + } +} diff --git a/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect.go b/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect.go new file mode 100644 index 000000000..7593af6cf --- /dev/null +++ b/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect.go @@ -0,0 +1,168 @@ +// Code generated by user config generator. DO NOT EDIT. + +package kafkaconnect + +import ( + "context" + + listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + attr "github.com/hashicorp/terraform-plugin-framework/attr" + datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + diag "github.com/hashicorp/terraform-plugin-framework/diag" + resource "github.com/hashicorp/terraform-plugin-framework/resource/schema" + validator "github.com/hashicorp/terraform-plugin-framework/schema/validator" + types "github.com/hashicorp/terraform-plugin-framework/types" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +// NewResourceSchema returns resource schema +func NewResourceSchema() resource.ListNestedBlock { + return resource.ListNestedBlock{ + Description: "Integration user config", + NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"kafka_connect": resource.ListNestedBlock{ + Description: "Kafka Connect service configuration values", + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ + "config_storage_topic": resource.StringAttribute{ + Computed: true, + Description: "The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id.", + Optional: true, + }, + "group_id": resource.StringAttribute{ + Computed: true, + Description: "A unique string that identifies the Connect cluster group this worker belongs to.", + Optional: true, + }, + "offset_storage_topic": resource.StringAttribute{ + Computed: true, + Description: "The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id.", + Optional: true, + }, + "status_storage_topic": resource.StringAttribute{ + Computed: true, + Description: "The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id.", + Optional: true, + }, + }}, + }}}, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// NewDataSourceSchema returns datasource schema +func NewDataSourceSchema() datasource.ListNestedBlock { + return datasource.ListNestedBlock{ + Description: "Integration user config", + NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"kafka_connect": datasource.ListNestedBlock{ + Description: "Kafka Connect service configuration values", + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ + "config_storage_topic": datasource.StringAttribute{ + Computed: true, + Description: "The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id.", + }, + "group_id": datasource.StringAttribute{ + Computed: true, + Description: "A unique string that identifies the Connect cluster group this worker belongs to.", + }, + "offset_storage_topic": datasource.StringAttribute{ + Computed: true, + Description: "The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id.", + }, + "status_storage_topic": datasource.StringAttribute{ + Computed: true, + Description: "The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id.", + }, + }}, + }}}, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// tfoUserConfig Integration user config +type tfoUserConfig struct { + KafkaConnect types.List `tfsdk:"kafka_connect"` +} + +// dtoUserConfig request/response object +type dtoUserConfig struct { + KafkaConnect *dtoKafkaConnect `groups:"create,update" json:"kafka_connect,omitempty"` +} + +// expandUserConfig expands tf object into dto object +func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { + kafkaConnectVar := schemautil.ExpandListBlockNested[tfoKafkaConnect, dtoKafkaConnect](ctx, diags, expandKafkaConnect, o.KafkaConnect) + if diags.HasError() { + return nil + } + return &dtoUserConfig{KafkaConnect: kafkaConnectVar} +} + +// flattenUserConfig flattens dto object into tf object +func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { + kafkaConnectVar := schemautil.FlattenListBlockNested[dtoKafkaConnect, tfoKafkaConnect](ctx, diags, flattenKafkaConnect, kafkaConnectAttrs, o.KafkaConnect) + if diags.HasError() { + return nil + } + return &tfoUserConfig{KafkaConnect: kafkaConnectVar} +} + +var userConfigAttrs = map[string]attr.Type{"kafka_connect": types.ListType{ElemType: types.ObjectType{AttrTypes: kafkaConnectAttrs}}} + +// tfoKafkaConnect Kafka Connect service configuration values +type tfoKafkaConnect struct { + ConfigStorageTopic types.String `tfsdk:"config_storage_topic"` + GroupId types.String `tfsdk:"group_id"` + OffsetStorageTopic types.String `tfsdk:"offset_storage_topic"` + StatusStorageTopic types.String `tfsdk:"status_storage_topic"` +} + +// dtoKafkaConnect request/response object +type dtoKafkaConnect struct { + ConfigStorageTopic *string `groups:"create,update" json:"config_storage_topic,omitempty"` + GroupId *string `groups:"create,update" json:"group_id,omitempty"` + OffsetStorageTopic *string `groups:"create,update" json:"offset_storage_topic,omitempty"` + StatusStorageTopic *string `groups:"create,update" json:"status_storage_topic,omitempty"` +} + +// expandKafkaConnect expands tf object into dto object +func expandKafkaConnect(ctx context.Context, diags *diag.Diagnostics, o *tfoKafkaConnect) *dtoKafkaConnect { + return &dtoKafkaConnect{ + ConfigStorageTopic: schemautil.ValueStringPointer(o.ConfigStorageTopic), + GroupId: schemautil.ValueStringPointer(o.GroupId), + OffsetStorageTopic: schemautil.ValueStringPointer(o.OffsetStorageTopic), + StatusStorageTopic: schemautil.ValueStringPointer(o.StatusStorageTopic), + } +} + +// flattenKafkaConnect flattens dto object into tf object +func flattenKafkaConnect(ctx context.Context, diags *diag.Diagnostics, o *dtoKafkaConnect) *tfoKafkaConnect { + return &tfoKafkaConnect{ + ConfigStorageTopic: types.StringPointerValue(o.ConfigStorageTopic), + GroupId: types.StringPointerValue(o.GroupId), + OffsetStorageTopic: types.StringPointerValue(o.OffsetStorageTopic), + StatusStorageTopic: types.StringPointerValue(o.StatusStorageTopic), + } +} + +var kafkaConnectAttrs = map[string]attr.Type{ + "config_storage_topic": types.StringType, + "group_id": types.StringType, + "offset_storage_topic": types.StringType, + "status_storage_topic": types.StringType, +} + +// Expand public function that converts tf object into dto +func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { + return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +} + +// Flatten public function that converts dto into tf object +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { + o := new(dtoUserConfig) + err := schemautil.MapToDTO(m, o) + if err != nil { + diags.AddError("failed to marshal map user config to dto", err.Error()) + return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + } + return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) +} diff --git a/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect_test.go b/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect_test.go new file mode 100644 index 000000000..964993b87 --- /dev/null +++ b/internal/plugin/service/userconfig/integration/kafkaconnect/kafka_connect_test.go @@ -0,0 +1,82 @@ +// Code generated by user config generator. DO NOT EDIT. + +package kafkaconnect + +import ( + "context" + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/stretchr/testify/require" + + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +const allFields = `{ + "kafka_connect": { + "config_storage_topic": "foo", + "group_id": "foo", + "offset_storage_topic": "foo", + "status_storage_topic": "foo" + } +}` +const updateOnlyFields = `{ + "kafka_connect": { + "config_storage_topic": "foo", + "group_id": "foo", + "offset_storage_topic": "foo", + "status_storage_topic": "foo" + } +}` + +func TestUserConfig(t *testing.T) { + cases := []struct { + name string + source string + expect string + marshal func(any) (map[string]any, error) + }{ + { + name: "fields to create resource", + source: allFields, + expect: allFields, + marshal: schemautil.MarshalCreateUserConfig, + }, + { + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + marshal: schemautil.MarshalUpdateUserConfig, + }, + } + + ctx := context.Background() + diags := new(diag.Diagnostics) + for _, opt := range cases { + t.Run(opt.name, func(t *testing.T) { + dto := new(dtoUserConfig) + err := json.Unmarshal([]byte(opt.source), dto) + require.NoError(t, err) + + // From json to TF + tfo := flattenUserConfig(ctx, diags, dto) + require.Empty(t, diags) + + // From TF to json + config := expandUserConfig(ctx, diags, tfo) + require.Empty(t, diags) + + // Run specific marshal (create or update resource) + dtoConfig, err := opt.marshal(config) + require.NoError(t, err) + + // Compares that output is strictly equal to the input + // If so, the flow is valid + b, err := json.MarshalIndent(dtoConfig, "", " ") + require.NoError(t, err) + require.Empty(t, cmp.Diff(opt.expect, string(b))) + }) + } +} diff --git a/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs.go b/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs.go new file mode 100644 index 000000000..b1c8cb565 --- /dev/null +++ b/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs.go @@ -0,0 +1,114 @@ +// Code generated by user config generator. DO NOT EDIT. + +package kafkalogs + +import ( + "context" + + listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + attr "github.com/hashicorp/terraform-plugin-framework/attr" + datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + diag "github.com/hashicorp/terraform-plugin-framework/diag" + resource "github.com/hashicorp/terraform-plugin-framework/resource/schema" + validator "github.com/hashicorp/terraform-plugin-framework/schema/validator" + types "github.com/hashicorp/terraform-plugin-framework/types" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +// NewResourceSchema returns resource schema +func NewResourceSchema() resource.ListNestedBlock { + return resource.ListNestedBlock{ + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ + "kafka_topic": resource.StringAttribute{ + Description: "Topic name.", + Required: true, + }, + "selected_log_fields": resource.ListAttribute{ + Computed: true, + Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", + ElementType: types.StringType, + Optional: true, + Validators: []validator.List{listvalidator.SizeAtMost(5)}, + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// NewDataSourceSchema returns datasource schema +func NewDataSourceSchema() datasource.ListNestedBlock { + return datasource.ListNestedBlock{ + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ + "kafka_topic": datasource.StringAttribute{ + Computed: true, + Description: "Topic name.", + }, + "selected_log_fields": datasource.ListAttribute{ + Computed: true, + Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", + ElementType: types.StringType, + Validators: []validator.List{listvalidator.SizeAtMost(5)}, + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// tfoUserConfig +type tfoUserConfig struct { + KafkaTopic types.String `tfsdk:"kafka_topic"` + SelectedLogFields types.List `tfsdk:"selected_log_fields"` +} + +// dtoUserConfig request/response object +type dtoUserConfig struct { + KafkaTopic string `groups:"create,update" json:"kafka_topic"` + SelectedLogFields []string `groups:"create,update" json:"selected_log_fields,omitempty"` +} + +// expandUserConfig expands tf object into dto object +func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { + selectedLogFieldsVar := schemautil.ExpandList[string](ctx, diags, o.SelectedLogFields) + if diags.HasError() { + return nil + } + return &dtoUserConfig{ + KafkaTopic: o.KafkaTopic.ValueString(), + SelectedLogFields: selectedLogFieldsVar, + } +} + +// flattenUserConfig flattens dto object into tf object +func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { + selectedLogFieldsVar, d := types.ListValueFrom(ctx, types.StringType, o.SelectedLogFields) + diags.Append(d...) + if diags.HasError() { + return nil + } + return &tfoUserConfig{ + KafkaTopic: types.StringValue(o.KafkaTopic), + SelectedLogFields: selectedLogFieldsVar, + } +} + +var userConfigAttrs = map[string]attr.Type{ + "kafka_topic": types.StringType, + "selected_log_fields": types.ListType{ElemType: types.StringType}, +} + +// Expand public function that converts tf object into dto +func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { + return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +} + +// Flatten public function that converts dto into tf object +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { + o := new(dtoUserConfig) + err := schemautil.MapToDTO(m, o) + if err != nil { + diags.AddError("failed to marshal map user config to dto", err.Error()) + return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + } + return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) +} diff --git a/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs_test.go b/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs_test.go new file mode 100644 index 000000000..03a1c9ecc --- /dev/null +++ b/internal/plugin/service/userconfig/integration/kafkalogs/kafka_logs_test.go @@ -0,0 +1,78 @@ +// Code generated by user config generator. DO NOT EDIT. + +package kafkalogs + +import ( + "context" + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/stretchr/testify/require" + + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +const allFields = `{ + "kafka_topic": "foo", + "selected_log_fields": [ + "foo" + ] +}` +const updateOnlyFields = `{ + "kafka_topic": "foo", + "selected_log_fields": [ + "foo" + ] +}` + +func TestUserConfig(t *testing.T) { + cases := []struct { + name string + source string + expect string + marshal func(any) (map[string]any, error) + }{ + { + name: "fields to create resource", + source: allFields, + expect: allFields, + marshal: schemautil.MarshalCreateUserConfig, + }, + { + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + marshal: schemautil.MarshalUpdateUserConfig, + }, + } + + ctx := context.Background() + diags := new(diag.Diagnostics) + for _, opt := range cases { + t.Run(opt.name, func(t *testing.T) { + dto := new(dtoUserConfig) + err := json.Unmarshal([]byte(opt.source), dto) + require.NoError(t, err) + + // From json to TF + tfo := flattenUserConfig(ctx, diags, dto) + require.Empty(t, diags) + + // From TF to json + config := expandUserConfig(ctx, diags, tfo) + require.Empty(t, diags) + + // Run specific marshal (create or update resource) + dtoConfig, err := opt.marshal(config) + require.NoError(t, err) + + // Compares that output is strictly equal to the input + // If so, the flow is valid + b, err := json.MarshalIndent(dtoConfig, "", " ") + require.NoError(t, err) + require.Empty(t, cmp.Diff(opt.expect, string(b))) + }) + } +} diff --git a/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker.go b/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker.go new file mode 100644 index 000000000..548514588 --- /dev/null +++ b/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker.go @@ -0,0 +1,220 @@ +// Code generated by user config generator. DO NOT EDIT. + +package kafkamirrormaker + +import ( + "context" + + listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + attr "github.com/hashicorp/terraform-plugin-framework/attr" + datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + diag "github.com/hashicorp/terraform-plugin-framework/diag" + resource "github.com/hashicorp/terraform-plugin-framework/resource/schema" + validator "github.com/hashicorp/terraform-plugin-framework/schema/validator" + types "github.com/hashicorp/terraform-plugin-framework/types" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +// NewResourceSchema returns resource schema +func NewResourceSchema() resource.ListNestedBlock { + return resource.ListNestedBlock{ + Description: "Integration user config", + NestedObject: resource.NestedBlockObject{ + Attributes: map[string]resource.Attribute{"cluster_alias": resource.StringAttribute{ + Computed: true, + Description: "The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, '.', '_', and '-'.", + Optional: true, + }}, + Blocks: map[string]resource.Block{"kafka_mirrormaker": resource.ListNestedBlock{ + Description: "Kafka MirrorMaker configuration values", + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ + "consumer_fetch_min_bytes": resource.Int64Attribute{ + Computed: true, + Description: "The minimum amount of data the server should return for a fetch request.", + Optional: true, + }, + "producer_batch_size": resource.Int64Attribute{ + Computed: true, + Description: "The batch size in bytes producer will attempt to collect before publishing to broker.", + Optional: true, + }, + "producer_buffer_memory": resource.Int64Attribute{ + Computed: true, + Description: "The amount of bytes producer can use for buffering data before publishing to broker.", + Optional: true, + }, + "producer_compression_type": resource.StringAttribute{ + Computed: true, + Description: "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", + Optional: true, + }, + "producer_linger_ms": resource.Int64Attribute{ + Computed: true, + Description: "The linger time (ms) for waiting new data to arrive for publishing.", + Optional: true, + }, + "producer_max_request_size": resource.Int64Attribute{ + Computed: true, + Description: "The maximum request size in bytes.", + Optional: true, + }, + }}, + }}, + }, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// NewDataSourceSchema returns datasource schema +func NewDataSourceSchema() datasource.ListNestedBlock { + return datasource.ListNestedBlock{ + Description: "Integration user config", + NestedObject: datasource.NestedBlockObject{ + Attributes: map[string]datasource.Attribute{"cluster_alias": datasource.StringAttribute{ + Computed: true, + Description: "The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, '.', '_', and '-'.", + }}, + Blocks: map[string]datasource.Block{"kafka_mirrormaker": datasource.ListNestedBlock{ + Description: "Kafka MirrorMaker configuration values", + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ + "consumer_fetch_min_bytes": datasource.Int64Attribute{ + Computed: true, + Description: "The minimum amount of data the server should return for a fetch request.", + }, + "producer_batch_size": datasource.Int64Attribute{ + Computed: true, + Description: "The batch size in bytes producer will attempt to collect before publishing to broker.", + }, + "producer_buffer_memory": datasource.Int64Attribute{ + Computed: true, + Description: "The amount of bytes producer can use for buffering data before publishing to broker.", + }, + "producer_compression_type": datasource.StringAttribute{ + Computed: true, + Description: "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", + }, + "producer_linger_ms": datasource.Int64Attribute{ + Computed: true, + Description: "The linger time (ms) for waiting new data to arrive for publishing.", + }, + "producer_max_request_size": datasource.Int64Attribute{ + Computed: true, + Description: "The maximum request size in bytes.", + }, + }}, + }}, + }, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// tfoUserConfig Integration user config +type tfoUserConfig struct { + ClusterAlias types.String `tfsdk:"cluster_alias"` + KafkaMirrormaker types.List `tfsdk:"kafka_mirrormaker"` +} + +// dtoUserConfig request/response object +type dtoUserConfig struct { + ClusterAlias *string `groups:"create,update" json:"cluster_alias,omitempty"` + KafkaMirrormaker *dtoKafkaMirrormaker `groups:"create,update" json:"kafka_mirrormaker,omitempty"` +} + +// expandUserConfig expands tf object into dto object +func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { + kafkaMirrormakerVar := schemautil.ExpandListBlockNested[tfoKafkaMirrormaker, dtoKafkaMirrormaker](ctx, diags, expandKafkaMirrormaker, o.KafkaMirrormaker) + if diags.HasError() { + return nil + } + return &dtoUserConfig{ + ClusterAlias: schemautil.ValueStringPointer(o.ClusterAlias), + KafkaMirrormaker: kafkaMirrormakerVar, + } +} + +// flattenUserConfig flattens dto object into tf object +func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { + kafkaMirrormakerVar := schemautil.FlattenListBlockNested[dtoKafkaMirrormaker, tfoKafkaMirrormaker](ctx, diags, flattenKafkaMirrormaker, kafkaMirrormakerAttrs, o.KafkaMirrormaker) + if diags.HasError() { + return nil + } + return &tfoUserConfig{ + ClusterAlias: types.StringPointerValue(o.ClusterAlias), + KafkaMirrormaker: kafkaMirrormakerVar, + } +} + +var userConfigAttrs = map[string]attr.Type{ + "cluster_alias": types.StringType, + "kafka_mirrormaker": types.ListType{ElemType: types.ObjectType{AttrTypes: kafkaMirrormakerAttrs}}, +} + +// tfoKafkaMirrormaker Kafka MirrorMaker configuration values +type tfoKafkaMirrormaker struct { + ConsumerFetchMinBytes types.Int64 `tfsdk:"consumer_fetch_min_bytes"` + ProducerBatchSize types.Int64 `tfsdk:"producer_batch_size"` + ProducerBufferMemory types.Int64 `tfsdk:"producer_buffer_memory"` + ProducerCompressionType types.String `tfsdk:"producer_compression_type"` + ProducerLingerMs types.Int64 `tfsdk:"producer_linger_ms"` + ProducerMaxRequestSize types.Int64 `tfsdk:"producer_max_request_size"` +} + +// dtoKafkaMirrormaker request/response object +type dtoKafkaMirrormaker struct { + ConsumerFetchMinBytes *int64 `groups:"create,update" json:"consumer_fetch_min_bytes,omitempty"` + ProducerBatchSize *int64 `groups:"create,update" json:"producer_batch_size,omitempty"` + ProducerBufferMemory *int64 `groups:"create,update" json:"producer_buffer_memory,omitempty"` + ProducerCompressionType *string `groups:"create,update" json:"producer_compression_type,omitempty"` + ProducerLingerMs *int64 `groups:"create,update" json:"producer_linger_ms,omitempty"` + ProducerMaxRequestSize *int64 `groups:"create,update" json:"producer_max_request_size,omitempty"` +} + +// expandKafkaMirrormaker expands tf object into dto object +func expandKafkaMirrormaker(ctx context.Context, diags *diag.Diagnostics, o *tfoKafkaMirrormaker) *dtoKafkaMirrormaker { + return &dtoKafkaMirrormaker{ + ConsumerFetchMinBytes: schemautil.ValueInt64Pointer(o.ConsumerFetchMinBytes), + ProducerBatchSize: schemautil.ValueInt64Pointer(o.ProducerBatchSize), + ProducerBufferMemory: schemautil.ValueInt64Pointer(o.ProducerBufferMemory), + ProducerCompressionType: schemautil.ValueStringPointer(o.ProducerCompressionType), + ProducerLingerMs: schemautil.ValueInt64Pointer(o.ProducerLingerMs), + ProducerMaxRequestSize: schemautil.ValueInt64Pointer(o.ProducerMaxRequestSize), + } +} + +// flattenKafkaMirrormaker flattens dto object into tf object +func flattenKafkaMirrormaker(ctx context.Context, diags *diag.Diagnostics, o *dtoKafkaMirrormaker) *tfoKafkaMirrormaker { + return &tfoKafkaMirrormaker{ + ConsumerFetchMinBytes: types.Int64PointerValue(o.ConsumerFetchMinBytes), + ProducerBatchSize: types.Int64PointerValue(o.ProducerBatchSize), + ProducerBufferMemory: types.Int64PointerValue(o.ProducerBufferMemory), + ProducerCompressionType: types.StringPointerValue(o.ProducerCompressionType), + ProducerLingerMs: types.Int64PointerValue(o.ProducerLingerMs), + ProducerMaxRequestSize: types.Int64PointerValue(o.ProducerMaxRequestSize), + } +} + +var kafkaMirrormakerAttrs = map[string]attr.Type{ + "consumer_fetch_min_bytes": types.Int64Type, + "producer_batch_size": types.Int64Type, + "producer_buffer_memory": types.Int64Type, + "producer_compression_type": types.StringType, + "producer_linger_ms": types.Int64Type, + "producer_max_request_size": types.Int64Type, +} + +// Expand public function that converts tf object into dto +func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { + return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +} + +// Flatten public function that converts dto into tf object +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { + o := new(dtoUserConfig) + err := schemautil.MapToDTO(m, o) + if err != nil { + diags.AddError("failed to marshal map user config to dto", err.Error()) + return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + } + return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) +} diff --git a/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker_test.go b/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker_test.go new file mode 100644 index 000000000..1e269a0d6 --- /dev/null +++ b/internal/plugin/service/userconfig/integration/kafkamirrormaker/kafka_mirrormaker_test.go @@ -0,0 +1,88 @@ +// Code generated by user config generator. DO NOT EDIT. + +package kafkamirrormaker + +import ( + "context" + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/stretchr/testify/require" + + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +const allFields = `{ + "cluster_alias": "foo", + "kafka_mirrormaker": { + "consumer_fetch_min_bytes": 1, + "producer_batch_size": 1, + "producer_buffer_memory": 1, + "producer_compression_type": "foo", + "producer_linger_ms": 1, + "producer_max_request_size": 1 + } +}` +const updateOnlyFields = `{ + "cluster_alias": "foo", + "kafka_mirrormaker": { + "consumer_fetch_min_bytes": 1, + "producer_batch_size": 1, + "producer_buffer_memory": 1, + "producer_compression_type": "foo", + "producer_linger_ms": 1, + "producer_max_request_size": 1 + } +}` + +func TestUserConfig(t *testing.T) { + cases := []struct { + name string + source string + expect string + marshal func(any) (map[string]any, error) + }{ + { + name: "fields to create resource", + source: allFields, + expect: allFields, + marshal: schemautil.MarshalCreateUserConfig, + }, + { + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + marshal: schemautil.MarshalUpdateUserConfig, + }, + } + + ctx := context.Background() + diags := new(diag.Diagnostics) + for _, opt := range cases { + t.Run(opt.name, func(t *testing.T) { + dto := new(dtoUserConfig) + err := json.Unmarshal([]byte(opt.source), dto) + require.NoError(t, err) + + // From json to TF + tfo := flattenUserConfig(ctx, diags, dto) + require.Empty(t, diags) + + // From TF to json + config := expandUserConfig(ctx, diags, tfo) + require.Empty(t, diags) + + // Run specific marshal (create or update resource) + dtoConfig, err := opt.marshal(config) + require.NoError(t, err) + + // Compares that output is strictly equal to the input + // If so, the flow is valid + b, err := json.MarshalIndent(dtoConfig, "", " ") + require.NoError(t, err) + require.Empty(t, cmp.Diff(opt.expect, string(b))) + }) + } +} diff --git a/internal/plugin/service/userconfig/integration/logs/logs.go b/internal/plugin/service/userconfig/integration/logs/logs.go new file mode 100644 index 000000000..7a9be7d51 --- /dev/null +++ b/internal/plugin/service/userconfig/integration/logs/logs.go @@ -0,0 +1,133 @@ +// Code generated by user config generator. DO NOT EDIT. + +package logs + +import ( + "context" + + listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + attr "github.com/hashicorp/terraform-plugin-framework/attr" + datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + diag "github.com/hashicorp/terraform-plugin-framework/diag" + resource "github.com/hashicorp/terraform-plugin-framework/resource/schema" + int64default "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64default" + stringdefault "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + validator "github.com/hashicorp/terraform-plugin-framework/schema/validator" + types "github.com/hashicorp/terraform-plugin-framework/types" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +// NewResourceSchema returns resource schema +func NewResourceSchema() resource.ListNestedBlock { + return resource.ListNestedBlock{ + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ + "elasticsearch_index_days_max": resource.Int64Attribute{ + Computed: true, + Default: int64default.StaticInt64(3), + Description: "Elasticsearch index retention limit. The default value is `3`.", + Optional: true, + }, + "elasticsearch_index_prefix": resource.StringAttribute{ + Computed: true, + Default: stringdefault.StaticString("logs"), + Description: "Elasticsearch index prefix. The default value is `logs`.", + Optional: true, + }, + "selected_log_fields": resource.ListAttribute{ + Computed: true, + Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", + ElementType: types.StringType, + Optional: true, + Validators: []validator.List{listvalidator.SizeAtMost(5)}, + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// NewDataSourceSchema returns datasource schema +func NewDataSourceSchema() datasource.ListNestedBlock { + return datasource.ListNestedBlock{ + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ + "elasticsearch_index_days_max": datasource.Int64Attribute{ + Computed: true, + Description: "Elasticsearch index retention limit. The default value is `3`.", + }, + "elasticsearch_index_prefix": datasource.StringAttribute{ + Computed: true, + Description: "Elasticsearch index prefix. The default value is `logs`.", + }, + "selected_log_fields": datasource.ListAttribute{ + Computed: true, + Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", + ElementType: types.StringType, + Validators: []validator.List{listvalidator.SizeAtMost(5)}, + }, + }}, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// tfoUserConfig +type tfoUserConfig struct { + ElasticsearchIndexDaysMax types.Int64 `tfsdk:"elasticsearch_index_days_max"` + ElasticsearchIndexPrefix types.String `tfsdk:"elasticsearch_index_prefix"` + SelectedLogFields types.List `tfsdk:"selected_log_fields"` +} + +// dtoUserConfig request/response object +type dtoUserConfig struct { + ElasticsearchIndexDaysMax *int64 `groups:"create,update" json:"elasticsearch_index_days_max,omitempty"` + ElasticsearchIndexPrefix *string `groups:"create,update" json:"elasticsearch_index_prefix,omitempty"` + SelectedLogFields []string `groups:"create,update" json:"selected_log_fields,omitempty"` +} + +// expandUserConfig expands tf object into dto object +func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { + selectedLogFieldsVar := schemautil.ExpandList[string](ctx, diags, o.SelectedLogFields) + if diags.HasError() { + return nil + } + return &dtoUserConfig{ + ElasticsearchIndexDaysMax: schemautil.ValueInt64Pointer(o.ElasticsearchIndexDaysMax), + ElasticsearchIndexPrefix: schemautil.ValueStringPointer(o.ElasticsearchIndexPrefix), + SelectedLogFields: selectedLogFieldsVar, + } +} + +// flattenUserConfig flattens dto object into tf object +func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { + selectedLogFieldsVar, d := types.ListValueFrom(ctx, types.StringType, o.SelectedLogFields) + diags.Append(d...) + if diags.HasError() { + return nil + } + return &tfoUserConfig{ + ElasticsearchIndexDaysMax: types.Int64PointerValue(o.ElasticsearchIndexDaysMax), + ElasticsearchIndexPrefix: types.StringPointerValue(o.ElasticsearchIndexPrefix), + SelectedLogFields: selectedLogFieldsVar, + } +} + +var userConfigAttrs = map[string]attr.Type{ + "elasticsearch_index_days_max": types.Int64Type, + "elasticsearch_index_prefix": types.StringType, + "selected_log_fields": types.ListType{ElemType: types.StringType}, +} + +// Expand public function that converts tf object into dto +func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { + return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +} + +// Flatten public function that converts dto into tf object +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { + o := new(dtoUserConfig) + err := schemautil.MapToDTO(m, o) + if err != nil { + diags.AddError("failed to marshal map user config to dto", err.Error()) + return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + } + return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) +} diff --git a/internal/plugin/service/userconfig/integration/logs/logs_test.go b/internal/plugin/service/userconfig/integration/logs/logs_test.go new file mode 100644 index 000000000..9635dcc31 --- /dev/null +++ b/internal/plugin/service/userconfig/integration/logs/logs_test.go @@ -0,0 +1,80 @@ +// Code generated by user config generator. DO NOT EDIT. + +package logs + +import ( + "context" + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/stretchr/testify/require" + + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +const allFields = `{ + "elasticsearch_index_days_max": 1, + "elasticsearch_index_prefix": "foo", + "selected_log_fields": [ + "foo" + ] +}` +const updateOnlyFields = `{ + "elasticsearch_index_days_max": 1, + "elasticsearch_index_prefix": "foo", + "selected_log_fields": [ + "foo" + ] +}` + +func TestUserConfig(t *testing.T) { + cases := []struct { + name string + source string + expect string + marshal func(any) (map[string]any, error) + }{ + { + name: "fields to create resource", + source: allFields, + expect: allFields, + marshal: schemautil.MarshalCreateUserConfig, + }, + { + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + marshal: schemautil.MarshalUpdateUserConfig, + }, + } + + ctx := context.Background() + diags := new(diag.Diagnostics) + for _, opt := range cases { + t.Run(opt.name, func(t *testing.T) { + dto := new(dtoUserConfig) + err := json.Unmarshal([]byte(opt.source), dto) + require.NoError(t, err) + + // From json to TF + tfo := flattenUserConfig(ctx, diags, dto) + require.Empty(t, diags) + + // From TF to json + config := expandUserConfig(ctx, diags, tfo) + require.Empty(t, diags) + + // Run specific marshal (create or update resource) + dtoConfig, err := opt.marshal(config) + require.NoError(t, err) + + // Compares that output is strictly equal to the input + // If so, the flow is valid + b, err := json.MarshalIndent(dtoConfig, "", " ") + require.NoError(t, err) + require.Empty(t, cmp.Diff(opt.expect, string(b))) + }) + } +} diff --git a/internal/plugin/service/userconfig/integration/metrics/metrics.go b/internal/plugin/service/userconfig/integration/metrics/metrics.go new file mode 100644 index 000000000..7670348fc --- /dev/null +++ b/internal/plugin/service/userconfig/integration/metrics/metrics.go @@ -0,0 +1,414 @@ +// Code generated by user config generator. DO NOT EDIT. + +package metrics + +import ( + "context" + + listvalidator "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + attr "github.com/hashicorp/terraform-plugin-framework/attr" + datasource "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + diag "github.com/hashicorp/terraform-plugin-framework/diag" + resource "github.com/hashicorp/terraform-plugin-framework/resource/schema" + validator "github.com/hashicorp/terraform-plugin-framework/schema/validator" + types "github.com/hashicorp/terraform-plugin-framework/types" + + schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +// NewResourceSchema returns resource schema +func NewResourceSchema() resource.ListNestedBlock { + return resource.ListNestedBlock{ + Description: "Integration user config", + NestedObject: resource.NestedBlockObject{ + Attributes: map[string]resource.Attribute{ + "database": resource.StringAttribute{ + Computed: true, + Description: "Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to 'metrics'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.", + Optional: true, + }, + "retention_days": resource.Int64Attribute{ + Computed: true, + Description: "Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.", + Optional: true, + }, + "ro_username": resource.StringAttribute{ + Computed: true, + Description: "Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to 'metrics_reader'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.", + Optional: true, + }, + "username": resource.StringAttribute{ + Computed: true, + Description: "Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to 'metrics_writer'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.", + Optional: true, + }, + }, + Blocks: map[string]resource.Block{"source_mysql": resource.ListNestedBlock{ + Description: "Configuration options for metrics where source service is MySQL", + NestedObject: resource.NestedBlockObject{Blocks: map[string]resource.Block{"telegraf": resource.ListNestedBlock{ + Description: "Configuration options for Telegraf MySQL input plugin", + NestedObject: resource.NestedBlockObject{Attributes: map[string]resource.Attribute{ + "gather_event_waits": resource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.", + Optional: true, + }, + "gather_file_events_stats": resource.BoolAttribute{ + Computed: true, + Description: "gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.", + Optional: true, + }, + "gather_index_io_waits": resource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.", + Optional: true, + }, + "gather_info_schema_auto_inc": resource.BoolAttribute{ + Computed: true, + Description: "Gather auto_increment columns and max values from information schema.", + Optional: true, + }, + "gather_innodb_metrics": resource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.", + Optional: true, + }, + "gather_perf_events_statements": resource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.", + Optional: true, + }, + "gather_process_list": resource.BoolAttribute{ + Computed: true, + Description: "Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.", + Optional: true, + }, + "gather_slave_status": resource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from SHOW SLAVE STATUS command output.", + Optional: true, + }, + "gather_table_io_waits": resource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.", + Optional: true, + }, + "gather_table_lock_waits": resource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.", + Optional: true, + }, + "gather_table_schema": resource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from INFORMATION_SCHEMA.TABLES.", + Optional: true, + }, + "perf_events_statements_digest_text_limit": resource.Int64Attribute{ + Computed: true, + Description: "Truncates digest text from perf_events_statements into this many characters.", + Optional: true, + }, + "perf_events_statements_limit": resource.Int64Attribute{ + Computed: true, + Description: "Limits metrics from perf_events_statements.", + Optional: true, + }, + "perf_events_statements_time_limit": resource.Int64Attribute{ + Computed: true, + Description: "Only include perf_events_statements whose last seen is less than this many seconds.", + Optional: true, + }, + }}, + }}}, + }}, + }, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// NewDataSourceSchema returns datasource schema +func NewDataSourceSchema() datasource.ListNestedBlock { + return datasource.ListNestedBlock{ + Description: "Integration user config", + NestedObject: datasource.NestedBlockObject{ + Attributes: map[string]datasource.Attribute{ + "database": datasource.StringAttribute{ + Computed: true, + Description: "Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to 'metrics'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.", + }, + "retention_days": datasource.Int64Attribute{ + Computed: true, + Description: "Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.", + }, + "ro_username": datasource.StringAttribute{ + Computed: true, + Description: "Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to 'metrics_reader'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.", + }, + "username": datasource.StringAttribute{ + Computed: true, + Description: "Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to 'metrics_writer'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.", + }, + }, + Blocks: map[string]datasource.Block{"source_mysql": datasource.ListNestedBlock{ + Description: "Configuration options for metrics where source service is MySQL", + NestedObject: datasource.NestedBlockObject{Blocks: map[string]datasource.Block{"telegraf": datasource.ListNestedBlock{ + Description: "Configuration options for Telegraf MySQL input plugin", + NestedObject: datasource.NestedBlockObject{Attributes: map[string]datasource.Attribute{ + "gather_event_waits": datasource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.", + }, + "gather_file_events_stats": datasource.BoolAttribute{ + Computed: true, + Description: "gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.", + }, + "gather_index_io_waits": datasource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.", + }, + "gather_info_schema_auto_inc": datasource.BoolAttribute{ + Computed: true, + Description: "Gather auto_increment columns and max values from information schema.", + }, + "gather_innodb_metrics": datasource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.", + }, + "gather_perf_events_statements": datasource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.", + }, + "gather_process_list": datasource.BoolAttribute{ + Computed: true, + Description: "Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.", + }, + "gather_slave_status": datasource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from SHOW SLAVE STATUS command output.", + }, + "gather_table_io_waits": datasource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.", + }, + "gather_table_lock_waits": datasource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.", + }, + "gather_table_schema": datasource.BoolAttribute{ + Computed: true, + Description: "Gather metrics from INFORMATION_SCHEMA.TABLES.", + }, + "perf_events_statements_digest_text_limit": datasource.Int64Attribute{ + Computed: true, + Description: "Truncates digest text from perf_events_statements into this many characters.", + }, + "perf_events_statements_limit": datasource.Int64Attribute{ + Computed: true, + Description: "Limits metrics from perf_events_statements.", + }, + "perf_events_statements_time_limit": datasource.Int64Attribute{ + Computed: true, + Description: "Only include perf_events_statements whose last seen is less than this many seconds.", + }, + }}, + }}}, + }}, + }, + Validators: []validator.List{listvalidator.SizeAtMost(1)}, + } +} + +// tfoUserConfig Integration user config +type tfoUserConfig struct { + Database types.String `tfsdk:"database"` + RetentionDays types.Int64 `tfsdk:"retention_days"` + RoUsername types.String `tfsdk:"ro_username"` + SourceMysql types.List `tfsdk:"source_mysql"` + Username types.String `tfsdk:"username"` +} + +// dtoUserConfig request/response object +type dtoUserConfig struct { + Database *string `groups:"create,update" json:"database,omitempty"` + RetentionDays *int64 `groups:"create,update" json:"retention_days,omitempty"` + RoUsername *string `groups:"create,update" json:"ro_username,omitempty"` + SourceMysql *dtoSourceMysql `groups:"create,update" json:"source_mysql,omitempty"` + Username *string `groups:"create,update" json:"username,omitempty"` +} + +// expandUserConfig expands tf object into dto object +func expandUserConfig(ctx context.Context, diags *diag.Diagnostics, o *tfoUserConfig) *dtoUserConfig { + sourceMysqlVar := schemautil.ExpandListBlockNested[tfoSourceMysql, dtoSourceMysql](ctx, diags, expandSourceMysql, o.SourceMysql) + if diags.HasError() { + return nil + } + return &dtoUserConfig{ + Database: schemautil.ValueStringPointer(o.Database), + RetentionDays: schemautil.ValueInt64Pointer(o.RetentionDays), + RoUsername: schemautil.ValueStringPointer(o.RoUsername), + SourceMysql: sourceMysqlVar, + Username: schemautil.ValueStringPointer(o.Username), + } +} + +// flattenUserConfig flattens dto object into tf object +func flattenUserConfig(ctx context.Context, diags *diag.Diagnostics, o *dtoUserConfig) *tfoUserConfig { + sourceMysqlVar := schemautil.FlattenListBlockNested[dtoSourceMysql, tfoSourceMysql](ctx, diags, flattenSourceMysql, sourceMysqlAttrs, o.SourceMysql) + if diags.HasError() { + return nil + } + return &tfoUserConfig{ + Database: types.StringPointerValue(o.Database), + RetentionDays: types.Int64PointerValue(o.RetentionDays), + RoUsername: types.StringPointerValue(o.RoUsername), + SourceMysql: sourceMysqlVar, + Username: types.StringPointerValue(o.Username), + } +} + +var userConfigAttrs = map[string]attr.Type{ + "database": types.StringType, + "retention_days": types.Int64Type, + "ro_username": types.StringType, + "source_mysql": types.ListType{ElemType: types.ObjectType{AttrTypes: sourceMysqlAttrs}}, + "username": types.StringType, +} + +// tfoSourceMysql Configuration options for metrics where source service is MySQL +type tfoSourceMysql struct { + Telegraf types.List `tfsdk:"telegraf"` +} + +// dtoSourceMysql request/response object +type dtoSourceMysql struct { + Telegraf *dtoTelegraf `groups:"create,update" json:"telegraf,omitempty"` +} + +// expandSourceMysql expands tf object into dto object +func expandSourceMysql(ctx context.Context, diags *diag.Diagnostics, o *tfoSourceMysql) *dtoSourceMysql { + telegrafVar := schemautil.ExpandListBlockNested[tfoTelegraf, dtoTelegraf](ctx, diags, expandTelegraf, o.Telegraf) + if diags.HasError() { + return nil + } + return &dtoSourceMysql{Telegraf: telegrafVar} +} + +// flattenSourceMysql flattens dto object into tf object +func flattenSourceMysql(ctx context.Context, diags *diag.Diagnostics, o *dtoSourceMysql) *tfoSourceMysql { + telegrafVar := schemautil.FlattenListBlockNested[dtoTelegraf, tfoTelegraf](ctx, diags, flattenTelegraf, telegrafAttrs, o.Telegraf) + if diags.HasError() { + return nil + } + return &tfoSourceMysql{Telegraf: telegrafVar} +} + +var sourceMysqlAttrs = map[string]attr.Type{"telegraf": types.ListType{ElemType: types.ObjectType{AttrTypes: telegrafAttrs}}} + +// tfoTelegraf Configuration options for Telegraf MySQL input plugin +type tfoTelegraf struct { + GatherEventWaits types.Bool `tfsdk:"gather_event_waits"` + GatherFileEventsStats types.Bool `tfsdk:"gather_file_events_stats"` + GatherIndexIoWaits types.Bool `tfsdk:"gather_index_io_waits"` + GatherInfoSchemaAutoInc types.Bool `tfsdk:"gather_info_schema_auto_inc"` + GatherInnodbMetrics types.Bool `tfsdk:"gather_innodb_metrics"` + GatherPerfEventsStatements types.Bool `tfsdk:"gather_perf_events_statements"` + GatherProcessList types.Bool `tfsdk:"gather_process_list"` + GatherSlaveStatus types.Bool `tfsdk:"gather_slave_status"` + GatherTableIoWaits types.Bool `tfsdk:"gather_table_io_waits"` + GatherTableLockWaits types.Bool `tfsdk:"gather_table_lock_waits"` + GatherTableSchema types.Bool `tfsdk:"gather_table_schema"` + PerfEventsStatementsDigestTextLimit types.Int64 `tfsdk:"perf_events_statements_digest_text_limit"` + PerfEventsStatementsLimit types.Int64 `tfsdk:"perf_events_statements_limit"` + PerfEventsStatementsTimeLimit types.Int64 `tfsdk:"perf_events_statements_time_limit"` +} + +// dtoTelegraf request/response object +type dtoTelegraf struct { + GatherEventWaits *bool `groups:"create,update" json:"gather_event_waits,omitempty"` + GatherFileEventsStats *bool `groups:"create,update" json:"gather_file_events_stats,omitempty"` + GatherIndexIoWaits *bool `groups:"create,update" json:"gather_index_io_waits,omitempty"` + GatherInfoSchemaAutoInc *bool `groups:"create,update" json:"gather_info_schema_auto_inc,omitempty"` + GatherInnodbMetrics *bool `groups:"create,update" json:"gather_innodb_metrics,omitempty"` + GatherPerfEventsStatements *bool `groups:"create,update" json:"gather_perf_events_statements,omitempty"` + GatherProcessList *bool `groups:"create,update" json:"gather_process_list,omitempty"` + GatherSlaveStatus *bool `groups:"create,update" json:"gather_slave_status,omitempty"` + GatherTableIoWaits *bool `groups:"create,update" json:"gather_table_io_waits,omitempty"` + GatherTableLockWaits *bool `groups:"create,update" json:"gather_table_lock_waits,omitempty"` + GatherTableSchema *bool `groups:"create,update" json:"gather_table_schema,omitempty"` + PerfEventsStatementsDigestTextLimit *int64 `groups:"create,update" json:"perf_events_statements_digest_text_limit,omitempty"` + PerfEventsStatementsLimit *int64 `groups:"create,update" json:"perf_events_statements_limit,omitempty"` + PerfEventsStatementsTimeLimit *int64 `groups:"create,update" json:"perf_events_statements_time_limit,omitempty"` +} + +// expandTelegraf expands tf object into dto object +func expandTelegraf(ctx context.Context, diags *diag.Diagnostics, o *tfoTelegraf) *dtoTelegraf { + return &dtoTelegraf{ + GatherEventWaits: schemautil.ValueBoolPointer(o.GatherEventWaits), + GatherFileEventsStats: schemautil.ValueBoolPointer(o.GatherFileEventsStats), + GatherIndexIoWaits: schemautil.ValueBoolPointer(o.GatherIndexIoWaits), + GatherInfoSchemaAutoInc: schemautil.ValueBoolPointer(o.GatherInfoSchemaAutoInc), + GatherInnodbMetrics: schemautil.ValueBoolPointer(o.GatherInnodbMetrics), + GatherPerfEventsStatements: schemautil.ValueBoolPointer(o.GatherPerfEventsStatements), + GatherProcessList: schemautil.ValueBoolPointer(o.GatherProcessList), + GatherSlaveStatus: schemautil.ValueBoolPointer(o.GatherSlaveStatus), + GatherTableIoWaits: schemautil.ValueBoolPointer(o.GatherTableIoWaits), + GatherTableLockWaits: schemautil.ValueBoolPointer(o.GatherTableLockWaits), + GatherTableSchema: schemautil.ValueBoolPointer(o.GatherTableSchema), + PerfEventsStatementsDigestTextLimit: schemautil.ValueInt64Pointer(o.PerfEventsStatementsDigestTextLimit), + PerfEventsStatementsLimit: schemautil.ValueInt64Pointer(o.PerfEventsStatementsLimit), + PerfEventsStatementsTimeLimit: schemautil.ValueInt64Pointer(o.PerfEventsStatementsTimeLimit), + } +} + +// flattenTelegraf flattens dto object into tf object +func flattenTelegraf(ctx context.Context, diags *diag.Diagnostics, o *dtoTelegraf) *tfoTelegraf { + return &tfoTelegraf{ + GatherEventWaits: types.BoolPointerValue(o.GatherEventWaits), + GatherFileEventsStats: types.BoolPointerValue(o.GatherFileEventsStats), + GatherIndexIoWaits: types.BoolPointerValue(o.GatherIndexIoWaits), + GatherInfoSchemaAutoInc: types.BoolPointerValue(o.GatherInfoSchemaAutoInc), + GatherInnodbMetrics: types.BoolPointerValue(o.GatherInnodbMetrics), + GatherPerfEventsStatements: types.BoolPointerValue(o.GatherPerfEventsStatements), + GatherProcessList: types.BoolPointerValue(o.GatherProcessList), + GatherSlaveStatus: types.BoolPointerValue(o.GatherSlaveStatus), + GatherTableIoWaits: types.BoolPointerValue(o.GatherTableIoWaits), + GatherTableLockWaits: types.BoolPointerValue(o.GatherTableLockWaits), + GatherTableSchema: types.BoolPointerValue(o.GatherTableSchema), + PerfEventsStatementsDigestTextLimit: types.Int64PointerValue(o.PerfEventsStatementsDigestTextLimit), + PerfEventsStatementsLimit: types.Int64PointerValue(o.PerfEventsStatementsLimit), + PerfEventsStatementsTimeLimit: types.Int64PointerValue(o.PerfEventsStatementsTimeLimit), + } +} + +var telegrafAttrs = map[string]attr.Type{ + "gather_event_waits": types.BoolType, + "gather_file_events_stats": types.BoolType, + "gather_index_io_waits": types.BoolType, + "gather_info_schema_auto_inc": types.BoolType, + "gather_innodb_metrics": types.BoolType, + "gather_perf_events_statements": types.BoolType, + "gather_process_list": types.BoolType, + "gather_slave_status": types.BoolType, + "gather_table_io_waits": types.BoolType, + "gather_table_lock_waits": types.BoolType, + "gather_table_schema": types.BoolType, + "perf_events_statements_digest_text_limit": types.Int64Type, + "perf_events_statements_limit": types.Int64Type, + "perf_events_statements_time_limit": types.Int64Type, +} + +// Expand public function that converts tf object into dto +func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { + return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +} + +// Flatten public function that converts dto into tf object +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { + o := new(dtoUserConfig) + err := schemautil.MapToDTO(m, o) + if err != nil { + diags.AddError("failed to marshal map user config to dto", err.Error()) + return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + } + return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) +} diff --git a/internal/plugin/service/userconfig/integration/metrics/metrics_test.go b/internal/plugin/service/userconfig/integration/metrics/metrics_test.go new file mode 100644 index 000000000..07c3e6b2f --- /dev/null +++ b/internal/plugin/service/userconfig/integration/metrics/metrics_test.go @@ -0,0 +1,114 @@ +// Code generated by user config generator. DO NOT EDIT. + +package metrics + +import ( + "context" + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/stretchr/testify/require" + + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +const allFields = `{ + "database": "foo", + "retention_days": 1, + "ro_username": "foo", + "source_mysql": { + "telegraf": { + "gather_event_waits": true, + "gather_file_events_stats": true, + "gather_index_io_waits": true, + "gather_info_schema_auto_inc": true, + "gather_innodb_metrics": true, + "gather_perf_events_statements": true, + "gather_process_list": true, + "gather_slave_status": true, + "gather_table_io_waits": true, + "gather_table_lock_waits": true, + "gather_table_schema": true, + "perf_events_statements_digest_text_limit": 1, + "perf_events_statements_limit": 1, + "perf_events_statements_time_limit": 1 + } + }, + "username": "foo" +}` +const updateOnlyFields = `{ + "database": "foo", + "retention_days": 1, + "ro_username": "foo", + "source_mysql": { + "telegraf": { + "gather_event_waits": true, + "gather_file_events_stats": true, + "gather_index_io_waits": true, + "gather_info_schema_auto_inc": true, + "gather_innodb_metrics": true, + "gather_perf_events_statements": true, + "gather_process_list": true, + "gather_slave_status": true, + "gather_table_io_waits": true, + "gather_table_lock_waits": true, + "gather_table_schema": true, + "perf_events_statements_digest_text_limit": 1, + "perf_events_statements_limit": 1, + "perf_events_statements_time_limit": 1 + } + }, + "username": "foo" +}` + +func TestUserConfig(t *testing.T) { + cases := []struct { + name string + source string + expect string + marshal func(any) (map[string]any, error) + }{ + { + name: "fields to create resource", + source: allFields, + expect: allFields, + marshal: schemautil.MarshalCreateUserConfig, + }, + { + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + marshal: schemautil.MarshalUpdateUserConfig, + }, + } + + ctx := context.Background() + diags := new(diag.Diagnostics) + for _, opt := range cases { + t.Run(opt.name, func(t *testing.T) { + dto := new(dtoUserConfig) + err := json.Unmarshal([]byte(opt.source), dto) + require.NoError(t, err) + + // From json to TF + tfo := flattenUserConfig(ctx, diags, dto) + require.Empty(t, diags) + + // From TF to json + config := expandUserConfig(ctx, diags, tfo) + require.Empty(t, diags) + + // Run specific marshal (create or update resource) + dtoConfig, err := opt.marshal(config) + require.NoError(t, err) + + // Compares that output is strictly equal to the input + // If so, the flow is valid + b, err := json.MarshalIndent(dtoConfig, "", " ") + require.NoError(t, err) + require.Empty(t, cmp.Diff(opt.expect, string(b))) + }) + } +} diff --git a/internal/plugin/util/schema.go b/internal/plugin/util/schema.go index 4d41a64fb..388335f5a 100644 --- a/internal/plugin/util/schema.go +++ b/internal/plugin/util/schema.go @@ -10,14 +10,16 @@ import ( // GeneralizeSchema is a function that generalizes the schema by adding the common definitions to the schema. func GeneralizeSchema(ctx context.Context, s schema.Schema) schema.Schema { - s.Blocks = map[string]schema.Block{ - "timeouts": timeouts.Block(ctx, timeouts.Opts{ - Create: true, - Read: true, - Update: true, - Delete: true, - }), + if s.Blocks == nil { + s.Blocks = make(map[string]schema.Block) } + s.Blocks["timeouts"] = timeouts.Block(ctx, timeouts.Opts{ + Create: true, + Read: true, + Update: true, + Delete: true, + }) + return s } diff --git a/internal/plugin/util/wait.go b/internal/plugin/util/wait.go new file mode 100644 index 000000000..a4393d46c --- /dev/null +++ b/internal/plugin/util/wait.go @@ -0,0 +1,20 @@ +package util + +import ( + "context" + "time" + + "github.com/avast/retry-go" +) + +// WaitActive waits for resource activity (for example) +// Top timeout comes from the context, no need to parse timeouts from the object. +// But eventually (attempts + connection timeout) * delay makes less timeout than we usually use (20 minutes or more) +func WaitActive(ctx context.Context, retryableFunc retry.RetryableFunc) error { + return retry.Do( + retryableFunc, + retry.Context(ctx), + retry.Attempts(10), + retry.Delay(2*time.Second), + ) +} diff --git a/internal/schemautil/plugin.go b/internal/schemautil/plugin.go new file mode 100644 index 000000000..b06dd8635 --- /dev/null +++ b/internal/schemautil/plugin.go @@ -0,0 +1,153 @@ +package schemautil + +import ( + "context" + "encoding/json" + "reflect" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/liip/sheriff" +) + +func ExpandList[T any](ctx context.Context, diags *diag.Diagnostics, list types.List) (items []T) { + if list.IsUnknown() || list.IsNull() { + return nil + } + diags.Append(list.ElementsAs(ctx, &items, false)...) + return items +} + +type Expander[T, K any] func(ctx context.Context, diags *diag.Diagnostics, o *T) *K + +func ExpandListNested[T, K any](ctx context.Context, diags *diag.Diagnostics, expand Expander[T, K], list types.List) []*K { + expanded := ExpandList[T](ctx, diags, list) + if expanded == nil || diags.HasError() { + return nil + } + + items := make([]*K, 0, len(expanded)) + for _, v := range expanded { + items = append(items, expand(ctx, diags, &v)) + if diags.HasError() { + return make([]*K, 0) + } + } + return items +} + +func ExpandListBlockNested[T, K any](ctx context.Context, diags *diag.Diagnostics, expand Expander[T, K], list types.List) *K { + items := ExpandListNested(ctx, diags, expand, list) + if len(items) == 0 { + return nil + } + return items[0] +} + +type Flattener[T, K any] func(ctx context.Context, diags *diag.Diagnostics, o *T) *K + +func FlattenListNested[T, K any](ctx context.Context, diags *diag.Diagnostics, flatten Flattener[T, K], attrs map[string]attr.Type, list []*T) types.List { + oType := types.ObjectType{AttrTypes: attrs} + empty := types.ListValueMust(oType, []attr.Value{}) + items := make([]*K, 0, len(list)) + for _, v := range list { + items = append(items, flatten(ctx, diags, v)) + if diags.HasError() { + return empty + } + } + + result, d := types.ListValueFrom(ctx, oType, items) + diags.Append(d...) + if diags.HasError() { + return empty + } + return result +} + +func FlattenListBlockNested[T, K any](ctx context.Context, diags *diag.Diagnostics, flatten Flattener[T, K], attrs map[string]attr.Type, o *T) types.List { + if o == nil { + return types.ListValueMust(types.ObjectType{AttrTypes: attrs}, []attr.Value{}) + } + return FlattenListNested(ctx, diags, flatten, attrs, []*T{o}) +} + +// marshalUserConfig converts user config into json +func marshalUserConfig(c any, groups ...string) (map[string]any, error) { + if c == nil || (reflect.ValueOf(c).Kind() == reflect.Ptr && reflect.ValueOf(c).IsNil()) { + return nil, nil + } + + o := &sheriff.Options{ + Groups: groups, + } + + i, err := sheriff.Marshal(o, c) + if err != nil { + return nil, err + } + + m, ok := i.(map[string]any) + if !ok { + // It is an empty pointer + // sheriff just returned the very same object + return nil, nil + } + + return m, nil +} + +// MarshalCreateUserConfig returns marshaled user config for Create operation +func MarshalCreateUserConfig(c any) (map[string]any, error) { + return marshalUserConfig(c, "create", "update") +} + +// MarshalUpdateUserConfig returns marshaled user config for Update operation +func MarshalUpdateUserConfig(c any) (map[string]any, error) { + return marshalUserConfig(c, "update") +} + +func MapToDTO(src map[string]any, dst any) error { + b, err := json.Marshal(&src) + if err != nil { + return err + } + return json.Unmarshal(b, dst) +} + +// ValueStringPointer checks for "unknown" +// Returns nil instead of zero value +func ValueStringPointer(v types.String) *string { + if v.IsUnknown() || v.IsNull() { + return nil + } + return v.ValueStringPointer() +} + +// ValueBoolPointer checks for "unknown" +// Returns nil instead of zero value +func ValueBoolPointer(v types.Bool) *bool { + if v.IsUnknown() || v.IsNull() { + return nil + } + return v.ValueBoolPointer() +} + +// ValueInt64Pointer checks for "unknown" +// Returns nil instead of zero value +func ValueInt64Pointer(v types.Int64) *int64 { + if v.IsUnknown() || v.IsNull() { + return nil + } + return v.ValueInt64Pointer() +} + +// ValueFloat64Pointer checks for "unknown" +// Returns nil instead of zero value +func ValueFloat64Pointer(v types.Float64) *float64 { + if v.IsUnknown() || v.IsNull() { + return nil + } + return v.ValueFloat64Pointer() +} diff --git a/internal/sdkprovider/provider/provider.go b/internal/sdkprovider/provider/provider.go index 1628932b3..4101328c3 100644 --- a/internal/sdkprovider/provider/provider.go +++ b/internal/sdkprovider/provider/provider.go @@ -104,7 +104,6 @@ func Provider(version string) *schema.Provider { "aiven_transit_gateway_vpc_attachment": vpc.DatasourceTransitGatewayVPCAttachment(), // service integrations - "aiven_service_integration": serviceintegration.DatasourceServiceIntegration(), "aiven_service_integration_endpoint": serviceintegration.DatasourceServiceIntegrationEndpoint(), // m3db @@ -203,7 +202,7 @@ func Provider(version string) *schema.Provider { "aiven_transit_gateway_vpc_attachment": vpc.ResourceTransitGatewayVPCAttachment(), // service integrations - "aiven_service_integration": serviceintegration.ResourceServiceIntegration(), + //"aiven_service_integration": serviceintegration.ResourceServiceIntegration(), "aiven_service_integration_endpoint": serviceintegration.ResourceServiceIntegrationEndpoint(), // m3db diff --git a/internal/sdkprovider/service/kafkatopic/kafka_topic_cache.go b/internal/sdkprovider/service/kafkatopic/kafka_topic_cache.go index e2465684e..39e502bad 100644 --- a/internal/sdkprovider/service/kafkatopic/kafka_topic_cache.go +++ b/internal/sdkprovider/service/kafkatopic/kafka_topic_cache.go @@ -62,8 +62,6 @@ func (t *kafkaTopicCache) LoadByTopicName(projectName, serviceName, topicName st result.State = "CONFIGURING" } - log.Printf("[TRACE] retrieving from a topic cache `%+#v` for a topic name `%s`", result, topicName) - return result, ok } diff --git a/internal/sdkprovider/service/serviceintegration/service_integration_test.go b/internal/sdkprovider/service/serviceintegration/service_integration_test.go index a1ac5b75b..0284a2e20 100644 --- a/internal/sdkprovider/service/serviceintegration/service_integration_test.go +++ b/internal/sdkprovider/service/serviceintegration/service_integration_test.go @@ -24,7 +24,7 @@ func TestAccAivenServiceIntegration_should_fail(t *testing.T) { { Config: testAccServiceIntegrationShouldFailResource(), PlanOnly: true, - ExpectError: regexp.MustCompile("endpoint id should have the following format: project_name/endpoint_id"), + ExpectError: regexp.MustCompile("endpoint id should have the following"), }, }, }) diff --git a/main.go b/main.go index d82346f09..3d013145e 100644 --- a/main.go +++ b/main.go @@ -12,6 +12,7 @@ import ( ) //go:generate go test -tags userconfig ./internal/schemautil/userconfig +//go:generate go run ./ucgenerator/... --integrations clickhouse_kafka,clickhouse_postgresql,datadog,external_aws_cloudwatch_metrics,kafka_connect,kafka_logs,kafka_mirrormaker,logs,metrics // version is the version of the provider. var version = "dev" diff --git a/ucgenerator/main.go b/ucgenerator/main.go new file mode 100644 index 000000000..bd052bbf0 --- /dev/null +++ b/ucgenerator/main.go @@ -0,0 +1,598 @@ +package main + +import ( + "flag" + "fmt" + "go/format" + "log" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/aiven/go-api-schemas/pkg/dist" + "github.com/dave/jennifer/jen" + "golang.org/x/exp/slices" + "golang.org/x/tools/imports" + "gopkg.in/yaml.v3" +) + +const ( + destPath = "./internal/plugin/service/userconfig/" + localPrefix = "github.com/aiven/terraform-provider-aiven" + importDiag = "github.com/hashicorp/terraform-plugin-framework/diag" + importTypes = "github.com/hashicorp/terraform-plugin-framework/types" + importAttr = "github.com/hashicorp/terraform-plugin-framework/attr" + importSchemautil = "github.com/aiven/terraform-provider-aiven/internal/schemautil" + importResourceSchema = "github.com/hashicorp/terraform-plugin-framework/resource/schema" + importDatasourceSchema = "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + importListvalidator = "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + importValidator = "github.com/hashicorp/terraform-plugin-framework/schema/validator" + codeGenerated = "Code generated by user config generator. DO NOT EDIT." +) + +func main() { + var serviceList, integrationList string + flag.StringVar(&serviceList, "services", "", "Comma separated service list of names to generate for") + flag.StringVar(&integrationList, "integrations", "", "Comma separated integrations list of names to generate for") + flag.Parse() + + if serviceList+integrationList == "" { + log.Fatal("--service or --integrations must be provided") + } + + if serviceList != "" { + err := generate("service", dist.ServiceTypes, strings.Split(serviceList, ",")) + if err != nil { + log.Fatal(err) + } + } + + if integrationList != "" { + err := generate("integration", dist.IntegrationTypes, strings.Split(integrationList, ",")) + if err != nil { + log.Fatal(err) + } + } +} + +func generate(kind string, data []byte, keys []string) error { + var root map[string]*object + + err := yaml.Unmarshal(data, &root) + if err != nil { + return err + } + + for key, o := range root { + if !slices.Contains(keys, key) { + continue + } + + pkgName := strings.ReplaceAll(key, "_", "") + o.isRoot = true + o.init("UserConfig") + + // Generates file + f := jen.NewFile(pkgName) + f.HeaderComment(codeGenerated) + f.ImportAlias(importResourceSchema, "resource") + f.ImportAlias(importDatasourceSchema, "datasource") + genAllForObject(f, o) + + // Sorts imports + imports.LocalPrefix = localPrefix + b, err := imports.Process("", []byte(f.GoString()), nil) + if err != nil { + return err + } + + // Saves file + dirPath := filepath.Join(destPath, kind, pkgName) + err = os.MkdirAll(dirPath, os.ModePerm) + if err != nil { + return err + } + + err = os.WriteFile(filepath.Join(dirPath, key+".go"), b, 0644) + if err != nil { + return err + } + + testFile, err := genTestFile(pkgName, o) + if err != nil { + return err + } + + testFileByte, err := format.Source([]byte(testFile)) + if err != nil { + return err + } + + err = os.WriteFile(filepath.Join(dirPath, key+"_test.go"), testFileByte, 0644) + if err != nil { + return err + } + } + return nil +} + +func genAllForObject(f *jen.File, o *object) { + genSchema(f, o, "Resource", importResourceSchema) + genSchema(f, o, "DataSource", importDatasourceSchema) + genTFObject(f, o) + genDTOObject(f, o) + genExpander(f, o) + genFlattener(f, o) + genAttrsMap(f, o) + + for _, p := range o.properties { + if p.isNestedBlock() { + if p.Type == objectTypeArray { + genAllForObject(f, p.ArrayItems) + } else { + genAllForObject(f, p) + } + } + } + + if !o.isRoot { + return + } + + // Exports handy public functions for root object only + f.Op(` +// Expand public function that converts tf object into dto +func Expand(ctx context.Context, diags *diag.Diagnostics, list types.List) *dtoUserConfig { + return schemautil.ExpandListBlockNested[tfoUserConfig, dtoUserConfig](ctx, diags, expandUserConfig, list) +} + +// Flatten public function that converts dto into tf object +func Flatten(ctx context.Context, diags *diag.Diagnostics, m map[string]any) types.List { + o := new(dtoUserConfig) + err := schemautil.MapToDTO(m, o) + if err != nil { + diags.AddError("failed to marshal map user config to dto", err.Error()) + return types.ListNull(types.ObjectType{AttrTypes: userConfigAttrs}) + } + return schemautil.FlattenListBlockNested[dtoUserConfig, tfoUserConfig](ctx, diags, flattenUserConfig, userConfigAttrs, o) +} +`) +} + +// genExpander creates function that unwraps TF object into json +func genExpander(f *jen.File, o *object) { + body := make([]jen.Code, 0) + props := jen.Dict{} + for _, p := range o.properties { + var value *jen.Statement + switch p.Type { + case objectTypeObject: + value = jen.Op(p.varName) + v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "ExpandListBlockNested").Types(jen.Id(p.tfoStructName), jen.Id(p.dtoStructName)).Call( + jen.Id("ctx"), + jen.Id("diags"), + jen.Id("expand"+p.camelName), + jen.Id("o").Dot(p.camelName), + ) + body = append(body, v, ifErr()) + case objectTypeArray: + value = jen.Op(p.varName) + if p.ArrayItems.Type == objectTypeObject { + // It is a list of objects + v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "ExpandListNested").Types(jen.Id(p.tfoStructName), jen.Id(p.dtoStructName)).Call( + jen.Id("ctx"), + jen.Id("diags"), + jen.Id("expand"+p.camelName), + jen.Id("o").Dot(p.camelName), + ) + body = append(body, v, ifErr()) + } else { + // It is a list of scalars + // We don't want pointer scalars here + t := strings.ReplaceAll(getDTOType(p.ArrayItems), "*", "") + v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "ExpandList").Types(jen.Id(t)).Call( + jen.Id("ctx"), + jen.Id("diags"), + jen.Id("o").Dot(p.camelName), + ) + body = append(body, v, ifErr()) + } + default: + if p.Required { + value = jen.Id("o").Dot(p.camelName).Dot(getTFTypeToValue(p)).Call() + } else { + // Own functions for casting values + value = jen.Qual(importSchemautil, getTFTypeToValue(p)).Call(jen.Id("o").Dot(p.camelName)) + } + } + + props[jen.Id(p.camelName)] = value + } + + // Function body + return statement + body = append( + body, + jen.Return(jen.Id("&"+o.dtoStructName).Values(props)), + ) + + funcName := "expand" + o.camelName + f.Comment(funcName + " expands tf object into dto object") + f.Func().Id(funcName).Params( + jen.Id("ctx").Qual("context", "Context"), + jen.Id("diags").Op("*").Qual(importDiag, "Diagnostics"), + jen.Id("o").Op("*"+o.tfoStructName), + ).Id("*" + o.dtoStructName).Block(body...) +} + +// genFlattener creates function that unwraps json into TF object +func genFlattener(f *jen.File, o *object) { + body := make([]jen.Code, 0) + props := jen.Dict{} + for _, p := range o.properties { + var value *jen.Statement + switch p.Type { + case objectTypeObject: + value = jen.Op(p.varName) + v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "FlattenListBlockNested").Types(jen.Id(p.dtoStructName), jen.Id(p.tfoStructName)).Call( + jen.Id("ctx"), + jen.Id("diags"), + jen.Id("flatten"+p.camelName), + jen.Id(p.attrsName), + jen.Id("o").Dot(p.camelName), + ) + body = append(body, v, ifErr()) + case objectTypeArray: + value = jen.Op(p.varName) + if p.ArrayItems.Type == objectTypeObject { + // It is a list of objects + v := jen.Id(p.varName).Op(":=").Qual(importSchemautil, "FlattenListNested").Types(jen.Id(p.dtoStructName), jen.Id(p.tfoStructName)).Call( + jen.Id("ctx"), + jen.Id("diags"), + jen.Id("flatten"+p.camelName), + jen.Id(p.attrsName), + jen.Id("o").Dot(p.camelName), + ) + body = append(body, v, ifErr()) + } else { + //It is a list of scalars + v := jen.List(jen.Id(p.varName), jen.Id("d")).Op(":=").Qual(importTypes, "ListValueFrom").Call( + jen.Id("ctx"), + jen.Qual(importTypes, getTFType(p.ArrayItems)+"Type"), + jen.Id("o").Dot(p.camelName), + ) + body = append( + body, + v, + jen.Id("diags").Dot("Append").Call(jen.Id("d").Op("...")), + ifErr(), + ) + } + default: + value = jen.Qual(importTypes, getTFTypeFromValue(p)).Call(jen.Id("o").Dot(p.camelName)) + } + + if value == nil { + continue + } + + props[jen.Id(p.camelName)] = value + } + + // Function body + return statement + body = append( + body, + jen.Return(jen.Id("&"+o.tfoStructName).Values(props)), + ) + + funcName := "flatten" + o.camelName + f.Comment(funcName + " flattens dto object into tf object") + f.Func().Id(funcName).Params( + jen.Id("ctx").Qual("context", "Context"), + jen.Id("diags").Op("*").Qual(importDiag, "Diagnostics"), + jen.Id("o").Op("*"+o.dtoStructName), + ).Id("*" + o.tfoStructName).Block(body...) +} + +// genAttrsMap creates attributes map for Flatten functions to "unwrap" response json into TF object +func genAttrsMap(f *jen.File, o *object) { + values := jen.Dict{} + for _, p := range o.properties { + key := jen.Lit(p.tfName) + switch p.Type { + case objectTypeArray, objectTypeObject: + var v jen.Code + if p.isNestedBlock() { + v = jen.Qual(importTypes, "ObjectType").Values(jen.Dict{ + jen.Id("AttrTypes"): jen.Id(p.attrsName), + }) + } else { + v = jen.Qual(importTypes, getTFType(p.ArrayItems)+"Type") + } + values[key] = jen.Qual(importTypes, "ListType").Values(jen.Dict{jen.Id("ElemType"): v}) + default: + values[key] = jen.Qual(importTypes, getTFType(p)+"Type") + } + } + f.Var().Id(o.attrsName).Op("=").Map(jen.String()).Qual(importAttr, "Type").Values(values) +} + +// genTFObject creates TF object (for plan) +func genTFObject(f *jen.File, o *object) { + fields := make([]jen.Code, 0) + for _, p := range o.properties { + fields = append(fields, jen.Id(p.camelName).Qual(importTypes, getTFType(p)).Tag(map[string]string{"tfsdk": p.tfName})) + } + f.Comment(fmt.Sprintf("%s %s", o.tfoStructName, getDescription(o))) + f.Type().Id(o.tfoStructName).Struct(fields...) +} + +// genDTOObject creates DTO object to send over HTTP +func genDTOObject(f *jen.File, o *object) { + fields := make([]jen.Code, 0) + for _, p := range o.properties { + tags := map[string]string{"json": p.jsonName, "groups": "create"} + if !p.Required { + tags["json"] += ",omitempty" + } + if !p.CreateOnly { + tags["groups"] += ",update" + } + fields = append(fields, jen.Id(p.camelName).Id(getDTOType(p)).Tag(tags)) + } + f.Comment(o.dtoStructName + " request/response object") + f.Type().Id(o.dtoStructName).Struct(fields...) +} + +// genSchema generates TF schema. For root object only, i.e. RedisUserConfig +func genSchema(f *jen.File, o *object, name, pkg string) { + if !o.isRoot { + return + } + + funcName := fmt.Sprintf("New%sSchema", name) + f.Comment(fmt.Sprintf("%s returns %s schema", funcName, strings.ToLower(name))) + f.Func().Id(funcName).Params().Qual(pkg, "ListNestedBlock").Block( + jen.Return(getSchemaAttributes(o, pkg)), + ) +} + +func getSchemaAttributes(o *object, pkg string) jen.Code { + isResource := pkg == importResourceSchema + blocks := jen.Dict{} + attribs := jen.Dict{} + + // Array properties are its item properties + properties := o.properties + if o.Type == objectTypeArray { + properties = o.ArrayItems.properties + } + + for _, p := range properties { + key := jen.Lit(p.tfName) + if p.isNestedBlock() { + blocks[key] = getSchemaAttributes(p, pkg) + } else { + // For scalars + var value *jen.Statement + switch p.Type { + case objectTypeObject: + // Schemaless map + //value = jen.Qual(importTypes, "StringType") + panic("schemaless objects are not supported") + case objectTypeArray: + value = jen.Qual(importTypes, getTFType(p.ArrayItems)+"Type") + } + + values := getSchemaAttributeValues(p, isResource) + values[jen.Id("ElementType")] = value + attribs[jen.Lit(p.tfName)] = jen.Qual(pkg, getTFType(p)+"Attribute").Values(values) + } + } + + nested := jen.Dict{} + if len(blocks) > 0 { + nested[jen.Id("Blocks")] = jen.Map(jen.String()).Qual(pkg, "Block").Values(blocks) + } + + if len(attribs) > 0 { + nested[jen.Id("Attributes")] = jen.Map(jen.String()).Qual(pkg, "Attribute").Values(attribs) + } + + values := getSchemaAttributeValues(o, isResource) + values[jen.Id("NestedObject")] = jen.Qual(pkg, "NestedBlockObject").Values(nested) + return jen.Qual(pkg, "ListNestedBlock").Values(values) +} + +func getSchemaAttributeValues(o *object, isResource bool) jen.Dict { + a := jen.Dict{} + + if d := getDescription(o); d != "" { + a[jen.Id("Description")] = jen.Lit(d) + } + + if o.IsDeprecated { + a[jen.Id("DeprecationMessage")] = jen.Lit(fmt.Sprintf("%q is deprecated", o.tfName)) + } + + validators := make([]jen.Code, 0) + if o.MinItems != nil { + validators = append(validators, valSizeAtLeast(*o.MinItems)) + } + + if o.MaxItems != nil { + validators = append(validators, valSizeAtMost(*o.MaxItems)) + } + + if !o.isNestedBlock() { + if !isResource { + a[jen.Id("Computed")] = jen.True() + } else { + if o.Required { + a[jen.Id("Required")] = jen.True() + } else { + a[jen.Id("Computed")] = jen.True() + a[jen.Id("Optional")] = jen.True() + + if o.Default != nil { + a[jen.Id("Default")] = getStaticDefault(o) + } + } + } + } + + if len(validators) > 0 { + a[jen.Id("Validators")] = valValidatorList(validators...) + } + + return a +} + +// getTFType matches generator types into plugin types +func getTFType(o *object) string { + switch o.Type { + case objectTypeObject: + if o.isNestedBlock() { + return "List" + } + return "Map" + case objectTypeArray: + return "List" + case objectTypeString: + return "String" + case objectTypeBoolean: + return "Bool" + case objectTypeInteger: + return "Int64" + case objectTypeNumber: + return "Float64" + } + panic(fmt.Sprintf("Unknown type for %q", o.jsonName)) +} + +func getTFTypeToValue(o *object) string { + v := getTFType(o) + if !o.Required { + return fmt.Sprintf("Value%sPointer", v) + } + return "Value" + v +} + +func getTFTypeFromValue(o *object) string { + v := getTFType(o) + if !o.Required { + return v + "PointerValue" + } + return v + "Value" +} + +func getDTOType(o *object) string { + optional := "*" + if o.Required { + optional = "" + } + + switch o.Type { + case objectTypeObject: + return "*" + o.dtoStructName + case objectTypeArray: + t := "[]" + getDTOType(o.ArrayItems) + if o.ArrayItems.Type == objectTypeObject { + return t + } + // We don't want pointer scalars in slice + return strings.ReplaceAll(t, "*", "") + case objectTypeString: + return optional + "string" + case objectTypeBoolean: + return optional + "bool" + case objectTypeInteger: + return optional + "int64" + case objectTypeNumber: + return optional + "float64" + } + panic(fmt.Sprintf("Unknown type for %q", o.jsonName)) +} + +// getStaticDefault returns "default" value for given field +func getStaticDefault(o *object) *jen.Statement { + var v *jen.Statement + switch o.Type { + case objectTypeString: + v = jen.Lit(o.Default.(string)) + case objectTypeInteger: + d, err := strconv.Atoi(o.Default.(string)) + if err != nil { + return nil + } + v = jen.Lit(d) + case objectTypeNumber: + v = jen.Lit(o.Default.(float64)) + case objectTypeBoolean: + v = jen.Lit(o.Default.(bool)) + default: + return nil + } + d := getTFType(o) + i := fmt.Sprintf("%s/%sdefault", importResourceSchema, strings.ToLower(d)) + return jen.Qual(i, "Static"+d).Call(v) +} + +func getDescription(o *object) string { + desc := make([]string, 0) + d := o.Description + if len(d) < len(o.Title) { + d = o.Title + } + + if d != "" { + desc = append(desc, addDot(d)) + } + + if o.Default != nil && o.Type != objectTypeArray { + desc = append(desc, fmt.Sprintf("The default value is `%v`.", o.Default)) + } + + // Trims dot from description, so it doesn't look weird with link to nested schema + // Example: Databases to expose[dot] (see [below for nested schema]...) + if len(desc) == 1 && o.isNestedBlock() { + return strings.Trim(desc[0], ".") + } + + return strings.Join(desc, " ") +} + +func addDot(s string) string { + if s != "" { + switch s[len(s)-1:] { + case ".", "!", "?": + default: + s += "." + } + } + return s +} + +func getValidator(name string, v any) *jen.Statement { + return jen.Qual(importListvalidator, name).Call(jen.Lit(v)) +} + +func valSizeAtLeast(n int) *jen.Statement { + return getValidator("SizeAtLeast", n) +} + +func valSizeAtMost(n int) *jen.Statement { + return getValidator("SizeAtMost", n) +} + +func valValidatorList(c ...jen.Code) *jen.Statement { + return jen.Index().Qual(importValidator, "List").Values(c...) +} + +func ifErr() *jen.Statement { + return jen.If(jen.Id("diags").Dot("HasError").Call()).Block(jen.Return(jen.Nil())) +} + +func toPtr[T any](v T) *T { + return &v +} diff --git a/ucgenerator/models.go b/ucgenerator/models.go new file mode 100644 index 000000000..2b6942269 --- /dev/null +++ b/ucgenerator/models.go @@ -0,0 +1,142 @@ +package main + +import ( + "strings" + + "github.com/stoewer/go-strcase" + "golang.org/x/exp/slices" +) + +type objectType string + +const ( + objectTypeObject objectType = "object" + objectTypeArray objectType = "array" + objectTypeString objectType = "string" + objectTypeBoolean objectType = "boolean" + objectTypeInteger objectType = "integer" + objectTypeNumber objectType = "number" +) + +type object struct { + isRoot bool // top level object + jsonName string // original name from json spec + tfName string // terraform manifest field, unlike jsonName, can't store dot symbol + tfoStructName string + dtoStructName string + camelName string + varName string + attrsName string + properties []*object + parent *object + + Type objectType `yaml:"-"` + Required bool `yaml:"-"` + + IsDeprecated bool `yaml:"is_deprecated"` + Default any `yaml:"default"` + Enum []*struct { + Value string `yaml:"value"` + IsDeprecated bool `yaml:"is_deprecated"` + } `yaml:"enum"` + Pattern string `yaml:"pattern"` + MinItems *int `yaml:"min_items"` + MaxItems *int `yaml:"max_items"` + MinLength *int `yaml:"min_length"` + MaxLength *int `yaml:"max_length"` + Minimum *float64 `yaml:"minimum"` + Maximum *float64 `yaml:"maximum"` + OrigType any `yaml:"type"` + Format string `yaml:"format"` + Title string `yaml:"title"` + Description string `yaml:"description"` + Properties map[string]*object `yaml:"properties"` + ArrayItems *object `yaml:"items"` + RequiredFields []string `yaml:"required"` + CreateOnly bool `yaml:"create_only"` + Nullable bool `yaml:"-"` +} + +func (o *object) isNestedBlock() bool { + switch o.Type { + case objectTypeObject: + return len(o.Properties) > 0 + case objectTypeArray: + switch o.ArrayItems.Type { + case objectTypeObject, objectTypeArray: + return true + } + } + return false +} + +func (o *object) init(name string) { + o.jsonName = name + o.tfName = strings.ReplaceAll(name, ".", "__") + o.camelName = toCamelCase(name) + + low := toLowerFirst(o.camelName) + o.varName = low + "Var" + o.attrsName = low + "Attrs" + o.tfoStructName = "tfo" + o.camelName + o.dtoStructName = "dto" + o.camelName + + // Sorts properties, so they keep order on each generation + keys := make([]string, 0, len(o.Properties)) + for k := range o.Properties { + keys = append(keys, k) + } + slices.Sort(keys) + for _, k := range keys { + o.properties = append(o.properties, o.Properties[k]) + } + + required := make(map[string]bool, len(o.RequiredFields)) + for _, k := range o.RequiredFields { + required[k] = true + } + + for _, k := range keys { + child := o.Properties[k] + child.parent = o + child.Required = required[k] + child.init(k) + } + + // Types can be list of strings, or a string + if v, ok := o.OrigType.(string); ok { + o.Type = objectType(v) + } else if v, ok := o.OrigType.([]interface{}); ok { + o.Type = objectType(v[0].(string)) + for _, t := range v { + switch s := t.(string); s { + case "null": + o.Nullable = true + default: + o.Type = objectType(s) + } + } + } + + if o.Type == objectTypeArray { + o.ArrayItems.parent = o + o.ArrayItems.init(name) + } + + // In terraform objects are lists of one item + // Root item and properties should have max constraint + if o.Type == objectTypeObject { + if o.isRoot || o.parent != nil && o.parent.Type == objectTypeObject { + o.MaxItems = toPtr(1) + } + } +} + +// toCamelCase some fields has dots within, makes cleaner camelCase +func toCamelCase(s string) string { + return strcase.UpperCamelCase(strings.ReplaceAll(s, ".", "_")) +} + +func toLowerFirst(s string) string { + return strings.ToLower(s[0:1]) + s[1:] +} diff --git a/ucgenerator/tests.go b/ucgenerator/tests.go new file mode 100644 index 000000000..fe63ec908 --- /dev/null +++ b/ucgenerator/tests.go @@ -0,0 +1,151 @@ +package main + +import ( + "encoding/json" + "fmt" + "strings" +) + +// genJSONSample generates sample JSON for a test +// If not allFields provided, creates a smaller json, which helps to test nil values (missing) +func genJSONSample(b *strings.Builder, o *object, allFields bool) string { + switch o.Type { + case objectTypeObject: + b.WriteString("{") + for i, p := range o.properties { + // Either field required or all fields printed + if !(p.Required || allFields || !p.CreateOnly) { + continue + } + + b.WriteString(fmt.Sprintf("%q:", p.jsonName)) + genJSONSample(b, p, allFields) + if i+1 != len(o.properties) { + b.WriteString(",") + } + } + b.WriteString("}") + case objectTypeArray: + b.WriteString("[") + genJSONSample(b, o.ArrayItems, allFields) + b.WriteString("]") + case objectTypeString: + b.WriteString(`"foo"`) + case objectTypeBoolean: + b.WriteString("true") + case objectTypeInteger: + b.WriteString("1") + case objectTypeNumber: + b.WriteString("1") + } + return b.String() +} + +func genTestFile(pkg string, o *object) (string, error) { + allFields, err := indentJSON(genJSONSample(new(strings.Builder), o, true)) + if err != nil { + return "", err + } + + updateOnlyFields, err := indentJSON(genJSONSample(new(strings.Builder), o, false)) + if err != nil { + return "", err + } + + file := fmt.Sprintf( + testFile, + codeGenerated, + pkg, + o.camelName, + fmt.Sprintf("`%s`", allFields), + fmt.Sprintf("`%s`", updateOnlyFields), + ) + + return strings.TrimSpace(file), nil +} + +func indentJSON(s string) (string, error) { + s = strings.ReplaceAll(s, ",}", "}") // fixes trailing comma when not all fields are generated + m := make(map[string]any) + err := json.Unmarshal([]byte(s), &m) + if err != nil { + return "", err + } + + b, err := json.MarshalIndent(m, "", " ") + if err != nil { + return "", err + } + return string(b), nil +} + +const testFile = ` +// %[1]s + +package %[2]s + +import ( + "context" + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/stretchr/testify/require" + + "github.com/aiven/terraform-provider-aiven/internal/schemautil" +) + +const allFields = %[4]s +const updateOnlyFields = %[5]s + +func Test%[3]s(t *testing.T) { + cases := []struct{ + name string + source string + expect string + marshal func (any) (map[string]any, error) + }{ + { + name: "fields to create resource", + source: allFields, + expect: allFields, + marshal: schemautil.MarshalCreateUserConfig, + }, + { + name: "only fields to update resource", + source: allFields, + expect: updateOnlyFields, // usually, fewer fields + marshal: schemautil.MarshalUpdateUserConfig, + }, + } + + ctx := context.Background() + diags := new(diag.Diagnostics) + for _, opt := range cases { + t.Run(opt.name, func(t *testing.T) { + dto := new(dto%[3]s) + err := json.Unmarshal([]byte(opt.source), dto) + require.NoError(t, err) + + // From json to TF + tfo := flatten%[3]s(ctx, diags, dto) + require.Empty(t, diags) + + // From TF to json + config := expand%[3]s(ctx, diags, tfo) + require.Empty(t, diags) + + // Run specific marshal (create or update resource) + dtoConfig, err := opt.marshal(config) + require.NoError(t, err) + + // Compares that output is strictly equal to the input + // If so, the flow is valid + b, err := json.MarshalIndent(dtoConfig, "", " ") + require.NoError(t, err) + require.Empty(t, cmp.Diff(opt.expect, string(b))) + }) + } +} +`