diff --git a/CHANGELOG.md b/CHANGELOG.md
index f46707a5a..0b5c89df7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -14,6 +14,7 @@ nav_order: 1
+- Use new user config generator to generate service integration configs
- Fix `aiven_kafka_schema` version update
## [4.14.0] - 2024-02-20
diff --git a/docs/resources/service_integration.md b/docs/resources/service_integration.md
index 7e9323920..ca6150a6f 100644
--- a/docs/resources/service_integration.md
+++ b/docs/resources/service_integration.md
@@ -58,21 +58,22 @@ resource "aiven_service_integration" "my_integration_metrics" {
Optional:
-- `tables` (Block List, Max: 100) Tables to create. (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables))
+- `tables` (Block List, Max: 100) Tables to create (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables))
### Nested Schema for `clickhouse_kafka_user_config.tables`
Required:
+- `columns` (Block List, Min: 1, Max: 100) Table columns (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--columns))
- `data_format` (String) Message data format. The default value is `JSONEachRow`.
- `group_name` (String) Kafka consumers group. The default value is `clickhouse`.
- `name` (String) Name of the table.
+- `topics` (Block List, Min: 1, Max: 100) Kafka topics (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--topics))
Optional:
- `auto_offset_reset` (String) Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.
-- `columns` (Block List, Max: 100) Table columns. (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--columns))
- `date_time_input_format` (String) Method to read DateTime from text input formats. The default value is `basic`.
- `handle_error_mode` (String) How to handle errors for Kafka engine. The default value is `default`.
- `max_block_size` (Number) Number of row collected by poll(s) for flushing data from Kafka. The default value is `0`.
@@ -80,7 +81,6 @@ Optional:
- `num_consumers` (Number) The number of consumers per table per replica. The default value is `1`.
- `poll_max_batch_size` (Number) Maximum amount of messages to be polled in a single Kafka poll. The default value is `0`.
- `skip_broken_messages` (Number) Skip at least this number of broken messages from Kafka topic per block. The default value is `0`.
-- `topics` (Block List, Max: 100) Kafka topics. (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--topics))
### Nested Schema for `clickhouse_kafka_user_config.tables.columns`
@@ -106,7 +106,7 @@ Required:
Optional:
-- `databases` (Block List, Max: 10) Databases to expose. (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config--databases))
+- `databases` (Block List, Max: 10) Databases to expose (see [below for nested schema](#nestedblock--clickhouse_postgresql_user_config--databases))
### Nested Schema for `clickhouse_postgresql_user_config.databases`
@@ -124,15 +124,15 @@ Optional:
Optional:
- `datadog_dbm_enabled` (Boolean) Enable Datadog Database Monitoring.
-- `datadog_tags` (Block List, Max: 32) Custom tags provided by user. (see [below for nested schema](#nestedblock--datadog_user_config--datadog_tags))
+- `datadog_tags` (Block List, Max: 32) Custom tags provided by user (see [below for nested schema](#nestedblock--datadog_user_config--datadog_tags))
- `exclude_consumer_groups` (List of String) List of custom metrics.
- `exclude_topics` (List of String) List of topics to exclude.
- `include_consumer_groups` (List of String) List of custom metrics.
- `include_topics` (List of String) List of topics to include.
- `kafka_custom_metrics` (List of String) List of custom metrics.
- `max_jmx_metrics` (Number) Maximum number of JMX metrics to send.
-- `opensearch` (Block List, Max: 1) Datadog Opensearch Options. (see [below for nested schema](#nestedblock--datadog_user_config--opensearch))
-- `redis` (Block List, Max: 1) Datadog Redis Options. (see [below for nested schema](#nestedblock--datadog_user_config--redis))
+- `opensearch` (Block List, Max: 1) Datadog Opensearch Options (see [below for nested schema](#nestedblock--datadog_user_config--opensearch))
+- `redis` (Block List, Max: 1) Datadog Redis Options (see [below for nested schema](#nestedblock--datadog_user_config--redis))
### Nested Schema for `datadog_user_config.datadog_tags`
@@ -170,8 +170,8 @@ Optional:
Optional:
-- `dropped_metrics` (Block List, Max: 1024) Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics). (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--dropped_metrics))
-- `extra_metrics` (Block List, Max: 1024) Metrics to allow through to AWS CloudWatch (in addition to default metrics). (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--extra_metrics))
+- `dropped_metrics` (Block List, Max: 1024) Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--dropped_metrics))
+- `extra_metrics` (Block List, Max: 1024) Metrics to allow through to AWS CloudWatch (in addition to default metrics) (see [below for nested schema](#nestedblock--external_aws_cloudwatch_metrics_user_config--extra_metrics))
### Nested Schema for `external_aws_cloudwatch_metrics_user_config.dropped_metrics`
@@ -197,7 +197,7 @@ Required:
Optional:
-- `kafka_connect` (Block List, Max: 1) Kafka Connect service configuration values. (see [below for nested schema](#nestedblock--kafka_connect_user_config--kafka_connect))
+- `kafka_connect` (Block List, Max: 1) Kafka Connect service configuration values (see [below for nested schema](#nestedblock--kafka_connect_user_config--kafka_connect))
### Nested Schema for `kafka_connect_user_config.kafka_connect`
@@ -229,7 +229,7 @@ Optional:
Optional:
- `cluster_alias` (String) The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, '.', '_', and '-'.
-- `kafka_mirrormaker` (Block List, Max: 1) Kafka MirrorMaker configuration values. (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--kafka_mirrormaker))
+- `kafka_mirrormaker` (Block List, Max: 1) Kafka MirrorMaker configuration values (see [below for nested schema](#nestedblock--kafka_mirrormaker_user_config--kafka_mirrormaker))
### Nested Schema for `kafka_mirrormaker_user_config.kafka_mirrormaker`
@@ -263,7 +263,7 @@ Optional:
- `database` (String) Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to 'metrics'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
- `retention_days` (Number) Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.
- `ro_username` (String) Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to 'metrics_reader'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
-- `source_mysql` (Block List, Max: 1) Configuration options for metrics where source service is MySQL. (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql))
+- `source_mysql` (Block List, Max: 1) Configuration options for metrics where source service is MySQL (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql))
- `username` (String) Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to 'metrics_writer'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.
@@ -271,7 +271,7 @@ Optional:
Optional:
-- `telegraf` (Block List, Max: 1) Configuration options for Telegraf MySQL input plugin. (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql--telegraf))
+- `telegraf` (Block List, Max: 1) Configuration options for Telegraf MySQL input plugin (see [below for nested schema](#nestedblock--metrics_user_config--source_mysql--telegraf))
### Nested Schema for `metrics_user_config.source_mysql.telegraf`
diff --git a/internal/sdkprovider/service/serviceintegration/service_integration.go b/internal/sdkprovider/service/serviceintegration/service_integration.go
index fed04ffb0..ba3e6e04a 100644
--- a/internal/sdkprovider/service/serviceintegration/service_integration.go
+++ b/internal/sdkprovider/service/serviceintegration/service_integration.go
@@ -12,104 +12,115 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
+ "golang.org/x/exp/maps"
+ "golang.org/x/exp/slices"
"github.com/aiven/terraform-provider-aiven/internal/common"
"github.com/aiven/terraform-provider-aiven/internal/schemautil"
"github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig"
"github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/apiconvert"
- "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/dist"
"github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig/stateupgrader"
+ "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/converters"
+ "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/integration"
)
const serviceIntegrationEndpointRegExp = "^[a-zA-Z0-9_-]*\\/{1}[a-zA-Z0-9_-]*$"
-var integrationTypes = []string{
- "alertmanager",
- "cassandra_cross_service_cluster",
- "clickhouse_kafka",
- "clickhouse_postgresql",
- "dashboard",
- "datadog",
- "datasource",
- "external_aws_cloudwatch_logs",
- "external_aws_cloudwatch_metrics",
- "external_elasticsearch_logs",
- "external_google_cloud_logging",
- "external_opensearch_logs",
- "flink",
- "internal_connectivity",
- "jolokia",
- "kafka_connect",
- "kafka_logs",
- "kafka_mirrormaker",
- "logs",
- "m3aggregator",
- "m3coordinator",
- "metrics",
- "opensearch_cross_cluster_replication",
- "opensearch_cross_cluster_search",
- "prometheus",
- "read_replica",
- "rsyslog",
- "schema_registry_proxy",
+// typesList integration type name as a key, and value is whether it has a config
+func typesList() map[string]bool {
+ return map[string]bool{
+ "alertmanager": false,
+ "cassandra_cross_service_cluster": false,
+ "clickhouse_kafka": true,
+ "clickhouse_postgresql": true,
+ "dashboard": false,
+ "datadog": true,
+ "datasource": false,
+ "external_aws_cloudwatch_logs": false,
+ "external_aws_cloudwatch_metrics": true,
+ "external_elasticsearch_logs": false,
+ "external_google_cloud_logging": false,
+ "external_opensearch_logs": false,
+ "flink": false,
+ "internal_connectivity": false,
+ "jolokia": false,
+ "kafka_connect": true,
+ "kafka_logs": true,
+ "kafka_mirrormaker": true,
+ "logs": true,
+ "m3aggregator": false,
+ "m3coordinator": false,
+ "metrics": true,
+ "opensearch_cross_cluster_replication": false,
+ "opensearch_cross_cluster_search": false,
+ "prometheus": false,
+ "read_replica": false,
+ "rsyslog": false,
+ "schema_registry_proxy": false,
+ }
}
-var aivenServiceIntegrationSchema = map[string]*schema.Schema{
- "integration_id": {
- Description: "Service Integration Id at aiven",
- Computed: true,
- Type: schema.TypeString,
- },
- "destination_endpoint_id": {
- Description: "Destination endpoint for the integration (if any)",
- ForceNew: true,
- Optional: true,
- Type: schema.TypeString,
- ValidateFunc: validation.StringMatch(regexp.MustCompile(serviceIntegrationEndpointRegExp),
- "endpoint id should have the following format: project_name/endpoint_id"),
- },
- "destination_service_name": {
- Description: "Destination service for the integration (if any)",
- ForceNew: true,
- Optional: true,
- Type: schema.TypeString,
- },
- "integration_type": {
- Description: "Type of the service integration. Possible values: " + schemautil.JoinQuoted(integrationTypes, ", ", "`"),
- ForceNew: true,
- Required: true,
- Type: schema.TypeString,
- ValidateFunc: validation.StringInSlice(integrationTypes, false),
- },
- "project": {
- Description: "Project the integration belongs to",
- ForceNew: true,
- Required: true,
- Type: schema.TypeString,
- },
- "source_endpoint_id": {
- Description: "Source endpoint for the integration (if any)",
- ForceNew: true,
- Optional: true,
- Type: schema.TypeString,
- ValidateFunc: validation.StringMatch(regexp.MustCompile(serviceIntegrationEndpointRegExp),
- "endpoint id should have the following format: project_name/endpoint_id"),
- },
- "source_service_name": {
- Description: "Source service for the integration (if any)",
- ForceNew: true,
- Optional: true,
- Type: schema.TypeString,
- },
- "logs_user_config": dist.IntegrationTypeLogs(),
- "kafka_mirrormaker_user_config": dist.IntegrationTypeKafkaMirrormaker(),
- "kafka_connect_user_config": dist.IntegrationTypeKafkaConnect(),
- "kafka_logs_user_config": dist.IntegrationTypeKafkaLogs(),
- "metrics_user_config": dist.IntegrationTypeMetrics(),
- "datadog_user_config": dist.IntegrationTypeDatadog(),
- "clickhouse_kafka_user_config": dist.IntegrationTypeClickhouseKafka(),
- "clickhouse_postgresql_user_config": dist.IntegrationTypeClickhousePostgresql(),
- "external_aws_cloudwatch_metrics_user_config": dist.IntegrationTypeExternalAwsCloudwatchMetrics(),
+func aivenServiceIntegrationSchema() map[string]*schema.Schema {
+ types := typesList()
+ sortedTypes := maps.Keys(types)
+ slices.Sort(sortedTypes)
+
+ s := map[string]*schema.Schema{
+ "integration_id": {
+ Description: "Service Integration Id at aiven",
+ Computed: true,
+ Type: schema.TypeString,
+ },
+ "destination_endpoint_id": {
+ Description: "Destination endpoint for the integration (if any)",
+ ForceNew: true,
+ Optional: true,
+ Type: schema.TypeString,
+ ValidateFunc: validation.StringMatch(regexp.MustCompile(serviceIntegrationEndpointRegExp),
+ "endpoint id should have the following format: project_name/endpoint_id"),
+ },
+ "destination_service_name": {
+ Description: "Destination service for the integration (if any)",
+ ForceNew: true,
+ Optional: true,
+ Type: schema.TypeString,
+ },
+ "integration_type": {
+ Description: "Type of the service integration. Possible values: " + schemautil.JoinQuoted(sortedTypes, ", ", "`"),
+ ForceNew: true,
+ Required: true,
+ Type: schema.TypeString,
+ ValidateFunc: validation.StringInSlice(sortedTypes, false),
+ },
+ "project": {
+ Description: "Project the integration belongs to",
+ ForceNew: true,
+ Required: true,
+ Type: schema.TypeString,
+ },
+ "source_endpoint_id": {
+ Description: "Source endpoint for the integration (if any)",
+ ForceNew: true,
+ Optional: true,
+ Type: schema.TypeString,
+ ValidateFunc: validation.StringMatch(regexp.MustCompile(serviceIntegrationEndpointRegExp),
+ "endpoint id should have the following format: project_name/endpoint_id"),
+ },
+ "source_service_name": {
+ Description: "Source service for the integration (if any)",
+ ForceNew: true,
+ Optional: true,
+ Type: schema.TypeString,
+ },
+ }
+
+ // Adds user configs
+ for _, k := range sortedTypes {
+ if types[k] {
+ s[k+"_user_config"] = integration.GetUserConfig(k)
+ }
+ }
+ return s
}
func ResourceServiceIntegration() *schema.Resource {
@@ -124,7 +135,7 @@ func ResourceServiceIntegration() *schema.Resource {
},
Timeouts: schemautil.DefaultResourceTimeouts(),
- Schema: aivenServiceIntegrationSchema,
+ Schema: aivenServiceIntegrationSchema(),
SchemaVersion: 1,
StateUpgraders: stateupgrader.ServiceIntegration(),
}
@@ -160,27 +171,27 @@ func resourceServiceIntegrationCreate(ctx context.Context, d *schema.ResourceDat
}
}
- uc, err := resourceServiceIntegrationUserConfigFromSchemaToAPI(d)
- if err != nil {
- return diag.FromErr(err)
+ req := aiven.CreateServiceIntegrationRequest{
+ DestinationEndpointID: plainEndpointID(schemautil.OptionalStringPointer(d, "destination_endpoint_id")),
+ DestinationService: schemautil.OptionalStringPointer(d, "destination_service_name"),
+ IntegrationType: integrationType,
+ SourceEndpointID: plainEndpointID(schemautil.OptionalStringPointer(d, "source_endpoint_id")),
+ SourceService: schemautil.OptionalStringPointer(d, "source_service_name"),
}
- integration, err := client.ServiceIntegrations.Create(
- ctx,
- projectName,
- aiven.CreateServiceIntegrationRequest{
- DestinationEndpointID: plainEndpointID(schemautil.OptionalStringPointer(d, "destination_endpoint_id")),
- DestinationService: schemautil.OptionalStringPointer(d, "destination_service_name"),
- IntegrationType: integrationType,
- SourceEndpointID: plainEndpointID(schemautil.OptionalStringPointer(d, "source_endpoint_id")),
- SourceService: schemautil.OptionalStringPointer(d, "source_service_name"),
- UserConfig: uc,
- },
- )
+ if typesList()[integrationType] {
+ uc, err := converters.Expand(integrationType, integration.GetUserConfig(integrationType), d)
+ if err != nil {
+ return diag.FromErr(err)
+ }
+ req.UserConfig = uc
+ }
+
+ res, err := client.ServiceIntegrations.Create(ctx, projectName, req)
if err != nil {
- return diag.Errorf("error creating serivce integration: %s", err)
+ return diag.Errorf("error creating service integration: %s", err)
}
- d.SetId(schemautil.BuildResourceID(projectName, integration.ServiceIntegrationID))
+ d.SetId(schemautil.BuildResourceID(projectName, res.ServiceIntegrationID))
if err = resourceServiceIntegrationWaitUntilActive(ctx, d, m); err != nil {
return diag.Errorf("unable to wait for service integration to become active: %s", err)
@@ -196,7 +207,7 @@ func resourceServiceIntegrationRead(ctx context.Context, d *schema.ResourceData,
return diag.FromErr(err)
}
- integration, err := client.ServiceIntegrations.Get(ctx, projectName, integrationID)
+ res, err := client.ServiceIntegrations.Get(ctx, projectName, integrationID)
if err != nil {
err = schemautil.ResourceReadHandleNotFound(err, d)
if err != nil {
@@ -205,7 +216,7 @@ func resourceServiceIntegrationRead(ctx context.Context, d *schema.ResourceData,
return nil
}
- if err = resourceServiceIntegrationCopyAPIResponseToTerraform(d, integration, projectName); err != nil {
+ if err = resourceServiceIntegrationCopyAPIResponseToTerraform(d, res, projectName); err != nil {
return diag.Errorf("cannot copy api response into terraform schema: %s", err)
}
@@ -350,48 +361,50 @@ func resourceServiceIntegrationUserConfigFromSchemaToAPI(d *schema.ResourceData)
func resourceServiceIntegrationCopyAPIResponseToTerraform(
d *schema.ResourceData,
- integration *aiven.ServiceIntegration,
+ res *aiven.ServiceIntegration,
project string,
) error {
if err := d.Set("project", project); err != nil {
return err
}
- if integration.DestinationEndpointID != nil {
- if err := d.Set("destination_endpoint_id", schemautil.BuildResourceID(project, *integration.DestinationEndpointID)); err != nil {
+ if res.DestinationEndpointID != nil {
+ if err := d.Set("destination_endpoint_id", schemautil.BuildResourceID(project, *res.DestinationEndpointID)); err != nil {
return err
}
- } else if integration.DestinationService != nil {
- if err := d.Set("destination_service_name", *integration.DestinationService); err != nil {
+ } else if res.DestinationService != nil {
+ if err := d.Set("destination_service_name", *res.DestinationService); err != nil {
return err
}
}
- if integration.SourceEndpointID != nil {
- if err := d.Set("source_endpoint_id", schemautil.BuildResourceID(project, *integration.SourceEndpointID)); err != nil {
+ if res.SourceEndpointID != nil {
+ if err := d.Set("source_endpoint_id", schemautil.BuildResourceID(project, *res.SourceEndpointID)); err != nil {
return err
}
- } else if integration.SourceService != nil {
- if err := d.Set("source_service_name", *integration.SourceService); err != nil {
+ } else if res.SourceService != nil {
+ if err := d.Set("source_service_name", *res.SourceService); err != nil {
return err
}
}
- if err := d.Set("integration_id", integration.ServiceIntegrationID); err != nil {
+ if err := d.Set("integration_id", res.ServiceIntegrationID); err != nil {
return err
}
- integrationType := integration.IntegrationType
+ integrationType := res.IntegrationType
if err := d.Set("integration_type", integrationType); err != nil {
return err
}
- userConfig, err := apiconvert.FromAPI(userconfig.IntegrationTypes, integrationType, integration.UserConfig)
- if err != nil {
- return err
- }
-
- if len(userConfig) > 0 {
- if err := d.Set(integrationType+"_user_config", userConfig); err != nil {
+ if typesList()[integrationType] {
+ userConfig, err := converters.Flatten(integrationType, integration.GetUserConfig(integrationType), d, res.UserConfig)
+ if err != nil {
return err
}
+ if len(userConfig) > 0 {
+ err := d.Set(integrationType+"_user_config", userConfig)
+ if err != nil {
+ return err
+ }
+ }
}
return nil
diff --git a/internal/sdkprovider/service/serviceintegration/service_integration_data_source.go b/internal/sdkprovider/service/serviceintegration/service_integration_data_source.go
index 014bcc8da..b219071df 100644
--- a/internal/sdkprovider/service/serviceintegration/service_integration_data_source.go
+++ b/internal/sdkprovider/service/serviceintegration/service_integration_data_source.go
@@ -14,7 +14,7 @@ func DatasourceServiceIntegration() *schema.Resource {
return &schema.Resource{
ReadContext: datasourceServiceIntegrationRead,
Description: "The Service Integration data source provides information about the existing Aiven Service Integration.",
- Schema: schemautil.ResourceSchemaAsDatasourceSchema(aivenServiceIntegrationSchema,
+ Schema: schemautil.ResourceSchemaAsDatasourceSchema(aivenServiceIntegrationSchema(),
"project", "integration_type", "source_service_name", "destination_service_name"),
}
}
diff --git a/internal/sdkprovider/userconfig/integration/clickhouse_kafka.go b/internal/sdkprovider/userconfig/integration/clickhouse_kafka.go
new file mode 100644
index 000000000..de12d5a9b
--- /dev/null
+++ b/internal/sdkprovider/userconfig/integration/clickhouse_kafka.go
@@ -0,0 +1,116 @@
+// Code generated by user config generator. DO NOT EDIT.
+
+package integration
+
+import (
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
+
+ "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/diff"
+)
+
+func clickhouseKafkaUserConfig() *schema.Schema {
+ return &schema.Schema{
+ Description: "ClickhouseKafka user configurable settings",
+ DiffSuppressFunc: diff.SuppressUnchanged,
+ Elem: &schema.Resource{Schema: map[string]*schema.Schema{"tables": {
+ Description: "Tables to create",
+ Elem: &schema.Resource{Schema: map[string]*schema.Schema{
+ "auto_offset_reset": {
+ Description: "Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.",
+ Optional: true,
+ Type: schema.TypeString,
+ ValidateFunc: validation.StringInSlice([]string{"smallest", "earliest", "beginning", "largest", "latest", "end"}, false),
+ },
+ "columns": {
+ Description: "Table columns",
+ Elem: &schema.Resource{Schema: map[string]*schema.Schema{
+ "name": {
+ Description: "Column name.",
+ Required: true,
+ Type: schema.TypeString,
+ },
+ "type": {
+ Description: "Column type.",
+ Required: true,
+ Type: schema.TypeString,
+ },
+ }},
+ MaxItems: 100,
+ Required: true,
+ Type: schema.TypeList,
+ },
+ "data_format": {
+ Description: "Message data format. The default value is `JSONEachRow`.",
+ Required: true,
+ Type: schema.TypeString,
+ ValidateFunc: validation.StringInSlice([]string{"Avro", "CSV", "JSONAsString", "JSONCompactEachRow", "JSONCompactStringsEachRow", "JSONEachRow", "JSONStringsEachRow", "MsgPack", "TSKV", "TSV", "TabSeparated", "RawBLOB", "AvroConfluent"}, false),
+ },
+ "date_time_input_format": {
+ Description: "Method to read DateTime from text input formats. The default value is `basic`.",
+ Optional: true,
+ Type: schema.TypeString,
+ ValidateFunc: validation.StringInSlice([]string{"basic", "best_effort", "best_effort_us"}, false),
+ },
+ "group_name": {
+ Description: "Kafka consumers group. The default value is `clickhouse`.",
+ Required: true,
+ Type: schema.TypeString,
+ },
+ "handle_error_mode": {
+ Description: "How to handle errors for Kafka engine. The default value is `default`.",
+ Optional: true,
+ Type: schema.TypeString,
+ ValidateFunc: validation.StringInSlice([]string{"default", "stream"}, false),
+ },
+ "max_block_size": {
+ Description: "Number of row collected by poll(s) for flushing data from Kafka. The default value is `0`.",
+ Optional: true,
+ Type: schema.TypeInt,
+ },
+ "max_rows_per_message": {
+ Description: "The maximum number of rows produced in one kafka message for row-based formats. The default value is `1`.",
+ Optional: true,
+ Type: schema.TypeInt,
+ },
+ "name": {
+ Description: "Name of the table.",
+ Required: true,
+ Type: schema.TypeString,
+ },
+ "num_consumers": {
+ Description: "The number of consumers per table per replica. The default value is `1`.",
+ Optional: true,
+ Type: schema.TypeInt,
+ },
+ "poll_max_batch_size": {
+ Description: "Maximum amount of messages to be polled in a single Kafka poll. The default value is `0`.",
+ Optional: true,
+ Type: schema.TypeInt,
+ },
+ "skip_broken_messages": {
+ Description: "Skip at least this number of broken messages from Kafka topic per block. The default value is `0`.",
+ Optional: true,
+ Type: schema.TypeInt,
+ },
+ "topics": {
+ Description: "Kafka topics",
+ Elem: &schema.Resource{Schema: map[string]*schema.Schema{"name": {
+ Description: "Name of the topic.",
+ Required: true,
+ Type: schema.TypeString,
+ }}},
+ MaxItems: 100,
+ Required: true,
+ Type: schema.TypeList,
+ },
+ }},
+ MaxItems: 100,
+ Optional: true,
+ Type: schema.TypeList,
+ }}},
+ MaxItems: 1,
+ Optional: true,
+ Type: schema.TypeList,
+ }
+}
diff --git a/internal/sdkprovider/userconfig/integration/clickhouse_postgresql.go b/internal/sdkprovider/userconfig/integration/clickhouse_postgresql.go
new file mode 100644
index 000000000..6dd04f28b
--- /dev/null
+++ b/internal/sdkprovider/userconfig/integration/clickhouse_postgresql.go
@@ -0,0 +1,37 @@
+// Code generated by user config generator. DO NOT EDIT.
+
+package integration
+
+import (
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+
+ "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/diff"
+)
+
+func clickhousePostgresqlUserConfig() *schema.Schema {
+ return &schema.Schema{
+ Description: "ClickhousePostgresql user configurable settings",
+ DiffSuppressFunc: diff.SuppressUnchanged,
+ Elem: &schema.Resource{Schema: map[string]*schema.Schema{"databases": {
+ Description: "Databases to expose",
+ Elem: &schema.Resource{Schema: map[string]*schema.Schema{
+ "database": {
+ Description: "PostgreSQL database to expose. The default value is `defaultdb`.",
+ Optional: true,
+ Type: schema.TypeString,
+ },
+ "schema": {
+ Description: "PostgreSQL schema to expose. The default value is `public`.",
+ Optional: true,
+ Type: schema.TypeString,
+ },
+ }},
+ MaxItems: 10,
+ Optional: true,
+ Type: schema.TypeList,
+ }}},
+ MaxItems: 1,
+ Optional: true,
+ Type: schema.TypeList,
+ }
+}
diff --git a/internal/sdkprovider/userconfig/integration/datadog.go b/internal/sdkprovider/userconfig/integration/datadog.go
new file mode 100644
index 000000000..91dc2b589
--- /dev/null
+++ b/internal/sdkprovider/userconfig/integration/datadog.go
@@ -0,0 +1,133 @@
+// Code generated by user config generator. DO NOT EDIT.
+
+package integration
+
+import (
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+
+ "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/diff"
+)
+
+func datadogUserConfig() *schema.Schema {
+ return &schema.Schema{
+ Description: "Datadog user configurable settings",
+ DiffSuppressFunc: diff.SuppressUnchanged,
+ Elem: &schema.Resource{Schema: map[string]*schema.Schema{
+ "datadog_dbm_enabled": {
+ Description: "Enable Datadog Database Monitoring.",
+ Optional: true,
+ Type: schema.TypeBool,
+ },
+ "datadog_tags": {
+ Description: "Custom tags provided by user",
+ Elem: &schema.Resource{Schema: map[string]*schema.Schema{
+ "comment": {
+ Description: "Optional tag explanation.",
+ Optional: true,
+ Type: schema.TypeString,
+ },
+ "tag": {
+ Description: "Tag format and usage are described here: https://docs.datadoghq.com/getting_started/tagging. Tags with prefix 'aiven-' are reserved for Aiven.",
+ Required: true,
+ Type: schema.TypeString,
+ },
+ }},
+ MaxItems: 32,
+ Optional: true,
+ Type: schema.TypeList,
+ },
+ "exclude_consumer_groups": {
+ Description: "List of custom metrics.",
+ Elem: &schema.Schema{
+ Description: "Consumer groups to exclude.",
+ Type: schema.TypeString,
+ },
+ MaxItems: 1024,
+ Optional: true,
+ Type: schema.TypeList,
+ },
+ "exclude_topics": {
+ Description: "List of topics to exclude.",
+ Elem: &schema.Schema{
+ Description: "Topics to exclude.",
+ Type: schema.TypeString,
+ },
+ MaxItems: 1024,
+ Optional: true,
+ Type: schema.TypeList,
+ },
+ "include_consumer_groups": {
+ Description: "List of custom metrics.",
+ Elem: &schema.Schema{
+ Description: "Consumer groups to include.",
+ Type: schema.TypeString,
+ },
+ MaxItems: 1024,
+ Optional: true,
+ Type: schema.TypeList,
+ },
+ "include_topics": {
+ Description: "List of topics to include.",
+ Elem: &schema.Schema{
+ Description: "Topics to include.",
+ Type: schema.TypeString,
+ },
+ MaxItems: 1024,
+ Optional: true,
+ Type: schema.TypeList,
+ },
+ "kafka_custom_metrics": {
+ Description: "List of custom metrics.",
+ Elem: &schema.Schema{
+ Description: "Metric name.",
+ Type: schema.TypeString,
+ },
+ MaxItems: 1024,
+ Optional: true,
+ Type: schema.TypeList,
+ },
+ "max_jmx_metrics": {
+ Description: "Maximum number of JMX metrics to send.",
+ Optional: true,
+ Type: schema.TypeInt,
+ },
+ "opensearch": {
+ Description: "Datadog Opensearch Options",
+ Elem: &schema.Resource{Schema: map[string]*schema.Schema{
+ "index_stats_enabled": {
+ Description: "Enable Datadog Opensearch Index Monitoring.",
+ Optional: true,
+ Type: schema.TypeBool,
+ },
+ "pending_task_stats_enabled": {
+ Description: "Enable Datadog Opensearch Pending Task Monitoring.",
+ Optional: true,
+ Type: schema.TypeBool,
+ },
+ "pshard_stats_enabled": {
+ Description: "Enable Datadog Opensearch Primary Shard Monitoring.",
+ Optional: true,
+ Type: schema.TypeBool,
+ },
+ }},
+ MaxItems: 1,
+ Optional: true,
+ Type: schema.TypeList,
+ },
+ "redis": {
+ Description: "Datadog Redis Options",
+ Elem: &schema.Resource{Schema: map[string]*schema.Schema{"command_stats_enabled": {
+ Description: "Enable command_stats option in the agent's configuration. The default value is `false`.",
+ Optional: true,
+ Type: schema.TypeBool,
+ }}},
+ MaxItems: 1,
+ Optional: true,
+ Type: schema.TypeList,
+ },
+ }},
+ MaxItems: 1,
+ Optional: true,
+ Type: schema.TypeList,
+ }
+}
diff --git a/internal/sdkprovider/userconfig/integration/external_aws_cloudwatch_metrics.go b/internal/sdkprovider/userconfig/integration/external_aws_cloudwatch_metrics.go
new file mode 100644
index 000000000..a1bb043a0
--- /dev/null
+++ b/internal/sdkprovider/userconfig/integration/external_aws_cloudwatch_metrics.go
@@ -0,0 +1,57 @@
+// Code generated by user config generator. DO NOT EDIT.
+
+package integration
+
+import (
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+
+ "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/diff"
+)
+
+func externalAwsCloudwatchMetricsUserConfig() *schema.Schema {
+ return &schema.Schema{
+ Description: "ExternalAwsCloudwatchMetrics user configurable settings",
+ DiffSuppressFunc: diff.SuppressUnchanged,
+ Elem: &schema.Resource{Schema: map[string]*schema.Schema{
+ "dropped_metrics": {
+ Description: "Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics)",
+ Elem: &schema.Resource{Schema: map[string]*schema.Schema{
+ "field": {
+ Description: "Identifier of a value in the metric.",
+ Required: true,
+ Type: schema.TypeString,
+ },
+ "metric": {
+ Description: "Identifier of the metric.",
+ Required: true,
+ Type: schema.TypeString,
+ },
+ }},
+ MaxItems: 1024,
+ Optional: true,
+ Type: schema.TypeList,
+ },
+ "extra_metrics": {
+ Description: "Metrics to allow through to AWS CloudWatch (in addition to default metrics)",
+ Elem: &schema.Resource{Schema: map[string]*schema.Schema{
+ "field": {
+ Description: "Identifier of a value in the metric.",
+ Required: true,
+ Type: schema.TypeString,
+ },
+ "metric": {
+ Description: "Identifier of the metric.",
+ Required: true,
+ Type: schema.TypeString,
+ },
+ }},
+ MaxItems: 1024,
+ Optional: true,
+ Type: schema.TypeList,
+ },
+ }},
+ MaxItems: 1,
+ Optional: true,
+ Type: schema.TypeList,
+ }
+}
diff --git a/internal/sdkprovider/userconfig/integration/integration.go b/internal/sdkprovider/userconfig/integration/integration.go
new file mode 100644
index 000000000..6effef62f
--- /dev/null
+++ b/internal/sdkprovider/userconfig/integration/integration.go
@@ -0,0 +1,30 @@
+// Code generated by user config generator. DO NOT EDIT.
+
+package integration
+
+import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+
+func GetUserConfig(kind string) *schema.Schema {
+ switch kind {
+ case "clickhouse_kafka":
+ return clickhouseKafkaUserConfig()
+ case "clickhouse_postgresql":
+ return clickhousePostgresqlUserConfig()
+ case "datadog":
+ return datadogUserConfig()
+ case "external_aws_cloudwatch_metrics":
+ return externalAwsCloudwatchMetricsUserConfig()
+ case "kafka_connect":
+ return kafkaConnectUserConfig()
+ case "kafka_logs":
+ return kafkaLogsUserConfig()
+ case "kafka_mirrormaker":
+ return kafkaMirrormakerUserConfig()
+ case "logs":
+ return logsUserConfig()
+ case "metrics":
+ return metricsUserConfig()
+ default:
+ panic("unknown user config type: " + kind)
+ }
+}
diff --git a/internal/sdkprovider/userconfig/integration/kafka_connect.go b/internal/sdkprovider/userconfig/integration/kafka_connect.go
new file mode 100644
index 000000000..e793f8efc
--- /dev/null
+++ b/internal/sdkprovider/userconfig/integration/kafka_connect.go
@@ -0,0 +1,47 @@
+// Code generated by user config generator. DO NOT EDIT.
+
+package integration
+
+import (
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+
+ "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/diff"
+)
+
+func kafkaConnectUserConfig() *schema.Schema {
+ return &schema.Schema{
+ Description: "KafkaConnect user configurable settings",
+ DiffSuppressFunc: diff.SuppressUnchanged,
+ Elem: &schema.Resource{Schema: map[string]*schema.Schema{"kafka_connect": {
+ Description: "Kafka Connect service configuration values",
+ Elem: &schema.Resource{Schema: map[string]*schema.Schema{
+ "config_storage_topic": {
+ Description: "The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id.",
+ Optional: true,
+ Type: schema.TypeString,
+ },
+ "group_id": {
+ Description: "A unique string that identifies the Connect cluster group this worker belongs to.",
+ Optional: true,
+ Type: schema.TypeString,
+ },
+ "offset_storage_topic": {
+ Description: "The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id.",
+ Optional: true,
+ Type: schema.TypeString,
+ },
+ "status_storage_topic": {
+ Description: "The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id.",
+ Optional: true,
+ Type: schema.TypeString,
+ },
+ }},
+ MaxItems: 1,
+ Optional: true,
+ Type: schema.TypeList,
+ }}},
+ MaxItems: 1,
+ Optional: true,
+ Type: schema.TypeList,
+ }
+}
diff --git a/internal/sdkprovider/userconfig/integration/kafka_logs.go b/internal/sdkprovider/userconfig/integration/kafka_logs.go
new file mode 100644
index 000000000..b536a20b7
--- /dev/null
+++ b/internal/sdkprovider/userconfig/integration/kafka_logs.go
@@ -0,0 +1,38 @@
+// Code generated by user config generator. DO NOT EDIT.
+
+package integration
+
+import (
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
+
+ "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/diff"
+)
+
+func kafkaLogsUserConfig() *schema.Schema {
+ return &schema.Schema{
+ Description: "KafkaLogs user configurable settings",
+ DiffSuppressFunc: diff.SuppressUnchanged,
+ Elem: &schema.Resource{Schema: map[string]*schema.Schema{
+ "kafka_topic": {
+ Description: "Topic name.",
+ Required: true,
+ Type: schema.TypeString,
+ },
+ "selected_log_fields": {
+ Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.",
+ Elem: &schema.Schema{
+ Description: "Log field name.",
+ Type: schema.TypeString,
+ ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "service_name", "SYSTEMD_UNIT"}, false),
+ },
+ MaxItems: 5,
+ Optional: true,
+ Type: schema.TypeList,
+ },
+ }},
+ MaxItems: 1,
+ Optional: true,
+ Type: schema.TypeList,
+ }
+}
diff --git a/internal/sdkprovider/userconfig/integration/kafka_mirrormaker.go b/internal/sdkprovider/userconfig/integration/kafka_mirrormaker.go
new file mode 100644
index 000000000..724405c73
--- /dev/null
+++ b/internal/sdkprovider/userconfig/integration/kafka_mirrormaker.go
@@ -0,0 +1,66 @@
+// Code generated by user config generator. DO NOT EDIT.
+
+package integration
+
+import (
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
+
+ "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/diff"
+)
+
+func kafkaMirrormakerUserConfig() *schema.Schema {
+ return &schema.Schema{
+ Description: "KafkaMirrormaker user configurable settings",
+ DiffSuppressFunc: diff.SuppressUnchanged,
+ Elem: &schema.Resource{Schema: map[string]*schema.Schema{
+ "cluster_alias": {
+ Description: "The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, '.', '_', and '-'.",
+ Optional: true,
+ Type: schema.TypeString,
+ },
+ "kafka_mirrormaker": {
+ Description: "Kafka MirrorMaker configuration values",
+ Elem: &schema.Resource{Schema: map[string]*schema.Schema{
+ "consumer_fetch_min_bytes": {
+ Description: "The minimum amount of data the server should return for a fetch request.",
+ Optional: true,
+ Type: schema.TypeInt,
+ },
+ "producer_batch_size": {
+ Description: "The batch size in bytes producer will attempt to collect before publishing to broker.",
+ Optional: true,
+ Type: schema.TypeInt,
+ },
+ "producer_buffer_memory": {
+ Description: "The amount of bytes producer can use for buffering data before publishing to broker.",
+ Optional: true,
+ Type: schema.TypeInt,
+ },
+ "producer_compression_type": {
+ Description: "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.",
+ Optional: true,
+ Type: schema.TypeString,
+ ValidateFunc: validation.StringInSlice([]string{"gzip", "snappy", "lz4", "zstd", "none"}, false),
+ },
+ "producer_linger_ms": {
+ Description: "The linger time (ms) for waiting new data to arrive for publishing.",
+ Optional: true,
+ Type: schema.TypeInt,
+ },
+ "producer_max_request_size": {
+ Description: "The maximum request size in bytes.",
+ Optional: true,
+ Type: schema.TypeInt,
+ },
+ }},
+ MaxItems: 1,
+ Optional: true,
+ Type: schema.TypeList,
+ },
+ }},
+ MaxItems: 1,
+ Optional: true,
+ Type: schema.TypeList,
+ }
+}
diff --git a/internal/sdkprovider/userconfig/integration/logs.go b/internal/sdkprovider/userconfig/integration/logs.go
new file mode 100644
index 000000000..dea19056f
--- /dev/null
+++ b/internal/sdkprovider/userconfig/integration/logs.go
@@ -0,0 +1,43 @@
+// Code generated by user config generator. DO NOT EDIT.
+
+package integration
+
+import (
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
+
+ "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/diff"
+)
+
+func logsUserConfig() *schema.Schema {
+ return &schema.Schema{
+ Description: "Logs user configurable settings",
+ DiffSuppressFunc: diff.SuppressUnchanged,
+ Elem: &schema.Resource{Schema: map[string]*schema.Schema{
+ "elasticsearch_index_days_max": {
+ Description: "Elasticsearch index retention limit. The default value is `3`.",
+ Optional: true,
+ Type: schema.TypeInt,
+ },
+ "elasticsearch_index_prefix": {
+ Description: "Elasticsearch index prefix. The default value is `logs`.",
+ Optional: true,
+ Type: schema.TypeString,
+ },
+ "selected_log_fields": {
+ Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.",
+ Elem: &schema.Schema{
+ Description: "Log field name.",
+ Type: schema.TypeString,
+ ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "service_name", "SYSTEMD_UNIT"}, false),
+ },
+ MaxItems: 5,
+ Optional: true,
+ Type: schema.TypeList,
+ },
+ }},
+ MaxItems: 1,
+ Optional: true,
+ Type: schema.TypeList,
+ }
+}
diff --git a/internal/sdkprovider/userconfig/integration/metrics.go b/internal/sdkprovider/userconfig/integration/metrics.go
new file mode 100644
index 000000000..6e2a6b389
--- /dev/null
+++ b/internal/sdkprovider/userconfig/integration/metrics.go
@@ -0,0 +1,125 @@
+// Code generated by user config generator. DO NOT EDIT.
+
+package integration
+
+import (
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+
+ "github.com/aiven/terraform-provider-aiven/internal/sdkprovider/userconfig/diff"
+)
+
+func metricsUserConfig() *schema.Schema {
+ return &schema.Schema{
+ Description: "Metrics user configurable settings",
+ DiffSuppressFunc: diff.SuppressUnchanged,
+ Elem: &schema.Resource{Schema: map[string]*schema.Schema{
+ "database": {
+ Description: "Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to 'metrics'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.",
+ Optional: true,
+ Type: schema.TypeString,
+ },
+ "retention_days": {
+ Description: "Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.",
+ Optional: true,
+ Type: schema.TypeInt,
+ },
+ "ro_username": {
+ Description: "Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to 'metrics_reader'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.",
+ Optional: true,
+ Type: schema.TypeString,
+ },
+ "source_mysql": {
+ Description: "Configuration options for metrics where source service is MySQL",
+ Elem: &schema.Resource{Schema: map[string]*schema.Schema{"telegraf": {
+ Description: "Configuration options for Telegraf MySQL input plugin",
+ Elem: &schema.Resource{Schema: map[string]*schema.Schema{
+ "gather_event_waits": {
+ Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.",
+ Optional: true,
+ Type: schema.TypeBool,
+ },
+ "gather_file_events_stats": {
+ Description: "gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.",
+ Optional: true,
+ Type: schema.TypeBool,
+ },
+ "gather_index_io_waits": {
+ Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.",
+ Optional: true,
+ Type: schema.TypeBool,
+ },
+ "gather_info_schema_auto_inc": {
+ Description: "Gather auto_increment columns and max values from information schema.",
+ Optional: true,
+ Type: schema.TypeBool,
+ },
+ "gather_innodb_metrics": {
+ Description: "Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.",
+ Optional: true,
+ Type: schema.TypeBool,
+ },
+ "gather_perf_events_statements": {
+ Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.",
+ Optional: true,
+ Type: schema.TypeBool,
+ },
+ "gather_process_list": {
+ Description: "Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.",
+ Optional: true,
+ Type: schema.TypeBool,
+ },
+ "gather_slave_status": {
+ Description: "Gather metrics from SHOW SLAVE STATUS command output.",
+ Optional: true,
+ Type: schema.TypeBool,
+ },
+ "gather_table_io_waits": {
+ Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.",
+ Optional: true,
+ Type: schema.TypeBool,
+ },
+ "gather_table_lock_waits": {
+ Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.",
+ Optional: true,
+ Type: schema.TypeBool,
+ },
+ "gather_table_schema": {
+ Description: "Gather metrics from INFORMATION_SCHEMA.TABLES.",
+ Optional: true,
+ Type: schema.TypeBool,
+ },
+ "perf_events_statements_digest_text_limit": {
+ Description: "Truncates digest text from perf_events_statements into this many characters.",
+ Optional: true,
+ Type: schema.TypeInt,
+ },
+ "perf_events_statements_limit": {
+ Description: "Limits metrics from perf_events_statements.",
+ Optional: true,
+ Type: schema.TypeInt,
+ },
+ "perf_events_statements_time_limit": {
+ Description: "Only include perf_events_statements whose last seen is less than this many seconds.",
+ Optional: true,
+ Type: schema.TypeInt,
+ },
+ }},
+ MaxItems: 1,
+ Optional: true,
+ Type: schema.TypeList,
+ }}},
+ MaxItems: 1,
+ Optional: true,
+ Type: schema.TypeList,
+ },
+ "username": {
+ Description: "Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to 'metrics_writer'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.",
+ Optional: true,
+ Type: schema.TypeString,
+ },
+ }},
+ MaxItems: 1,
+ Optional: true,
+ Type: schema.TypeList,
+ }
+}
diff --git a/main.go b/main.go
index df5035980..118a84a4b 100644
--- a/main.go
+++ b/main.go
@@ -13,6 +13,7 @@ import (
//go:generate go test -tags userconfig ./internal/schemautil/userconfig
//go:generate go run ./ucgenerator/... --services cassandra,clickhouse,flink,grafana,influxdb,kafka,kafka_connect,kafka_mirrormaker,m3aggregator,m3db,mysql,opensearch,pg,redis
+//go:generate go run ./ucgenerator/... --integrations logs,kafka_mirrormaker,kafka_connect,kafka_logs,metrics,datadog,clickhouse_kafka,clickhouse_postgresql,external_aws_cloudwatch_metrics
// registryPrefix is the registry prefix for the provider.
const registryPrefix = "registry.terraform.io/"