From 157fde57ffa1b776326744a50384cf0e003f8b9b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 14:59:18 +0100 Subject: [PATCH] build(deps): bump the main group across 1 directory with 4 updates (#1882) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Murad Biashimov --- CHANGELOG.md | 8 +++ changelog/differ.go | 68 ++++++++++++++----- changelog/differ_test.go | 26 +++---- changelog/main.go | 8 +-- changelog/types.go | 41 +++++------ docs/data-sources/account_team_project.md | 2 +- docs/data-sources/opensearch.md | 3 + docs/data-sources/project_user.md | 2 +- docs/resources/account_team_project.md | 2 +- docs/resources/cassandra.md | 2 +- docs/resources/dragonfly.md | 2 +- docs/resources/grafana.md | 8 +-- docs/resources/kafka.md | 16 ++--- docs/resources/kafka_connect.md | 6 +- docs/resources/mysql.md | 4 +- docs/resources/opensearch.md | 5 +- docs/resources/organization_group_project.md | 2 +- docs/resources/organization_permission.md | 2 +- docs/resources/pg.md | 12 ++-- docs/resources/project_user.md | 2 +- docs/resources/redis.md | 2 +- docs/resources/service_integration.md | 6 +- .../resources/service_integration_endpoint.md | 10 +-- docs/resources/valkey.md | 2 +- go.mod | 25 +++---- go.sum | 54 ++++++++------- .../service/kafkaschema/kafka_schema.go | 7 +- .../userconfig/service/cassandra.go | 1 - .../userconfig/service/dragonfly.go | 4 +- .../sdkprovider/userconfig/service/grafana.go | 16 ++--- .../sdkprovider/userconfig/service/kafka.go | 32 ++++----- .../userconfig/service/kafka_connect.go | 12 ++-- .../sdkprovider/userconfig/service/mysql.go | 8 +-- .../userconfig/service/opensearch.go | 19 +++++- internal/sdkprovider/userconfig/service/pg.go | 24 +++---- .../sdkprovider/userconfig/service/redis.go | 4 +- .../sdkprovider/userconfig/service/valkey.go | 4 +- .../serviceintegration/clickhouse_kafka.go | 8 +-- .../userconfig/serviceintegration/datadog.go | 5 +- .../external_aws_cloudwatch_logs.go | 4 +- .../external_elasticsearch_logs.go | 4 +- .../external_opensearch_logs.go | 4 +- .../serviceintegration/kafka_logs.go | 4 +- .../serviceintegration/kafka_mirrormaker.go | 4 +- .../userconfig/serviceintegration/logs.go | 4 +- .../serviceintegrationendpoint/datadog.go | 4 +- .../external_kafka.go | 4 +- .../external_postgresql.go | 4 +- .../external_schema_registry.go | 4 +- .../serviceintegrationendpoint/rsyslog.go | 4 +- ucgenerator/models.go | 8 +++ 51 files changed, 296 insertions(+), 220 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ccb9df5b..1132288aa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,14 @@ nav_order: 1 ## [MAJOR.MINOR.PATCH] - YYYY-MM-DD - Add support for `autoscaler` service integration +- Add `aiven_opensearch` resource field `opensearch_user_config.azure_migration.include_aliases`: Whether to restore aliases alongside their associated indexes +- Add `aiven_opensearch` datasource field `opensearch_user_config.azure_migration.include_aliases`: Whether to restore aliases alongside their associated indexes +- Add `aiven_opensearch` resource field `opensearch_user_config.gcs_migration.include_aliases`: Whether to restore aliases alongside their associated indexes +- Add `aiven_opensearch` datasource field `opensearch_user_config.gcs_migration.include_aliases`: Whether to restore aliases alongside their associated indexes +- Add `aiven_opensearch` resource field `opensearch_user_config.s3_migration.include_aliases`: Whether to restore aliases alongside their associated indexes +- Add `aiven_opensearch` datasource field `opensearch_user_config.s3_migration.include_aliases`: Whether to restore aliases alongside their associated indexes +- Change `aiven_cassandra` resource field `cassandra_user_config.additional_backup_regions`: remove deprecation +- Change `aiven_cassandra` datasource field `cassandra_user_config.additional_backup_regions`: remove deprecation ## [4.28.0] - 2024-10-21 diff --git a/changelog/differ.go b/changelog/differ.go index 5ecd70c8e..5567f0bfb 100644 --- a/changelog/differ.go +++ b/changelog/differ.go @@ -4,26 +4,28 @@ import ( "encoding/json" "fmt" "slices" + "sort" "strings" + "github.com/ettle/strcase" "github.com/google/go-cmp/cmp" "github.com/samber/lo" ) -func diffItems(resourceType ResourceType, was, have *Item) (*Diff, error) { +func diffItems(resourceType RootType, was, have *Item) (*Diff, error) { // Added or removed if was == nil || have == nil { - action := ChangeTypeAdd + action := AddDiffAction if have == nil { - action = ChangeTypeRemove + action = RemoveDiffAction have = was } return &Diff{ - Type: action, - ResourceType: resourceType, - Description: removeEnum(have.Description), - Item: have, + Action: action, + RootType: resourceType, + Description: removeEnum(have.Description), + Item: have, }, nil } @@ -55,7 +57,7 @@ func diffItems(resourceType ResourceType, was, have *Item) (*Diff, error) { case "deprecated": entry = "remove deprecation" if have.Deprecated != "" { - entry = fmt.Sprintf("deprecate: %s", have.Deprecated) + entry = fmt.Sprintf("deprecate: %s", strings.TrimRight(have.Deprecated, ". ")) } case "beta": entry = "marked as beta" @@ -78,16 +80,16 @@ func diffItems(resourceType ResourceType, was, have *Item) (*Diff, error) { } return &Diff{ - Type: ChangeTypeChange, - ResourceType: resourceType, - Description: strings.Join(entries, ", "), - Item: have, + Action: ChangeDiffAction, + RootType: resourceType, + Description: strings.Join(entries, ", "), + Item: have, }, nil } func diffItemMaps(was, have ItemMap) ([]string, error) { - result := make([]string, 0) - kinds := []ResourceType{ResourceKind, DataSourceKind} + result := make([]*Diff, 0) + kinds := []RootType{ResourceRootType, DataSourceRootType} for _, kind := range kinds { wasItems := was[kind] haveItems := have[kind] @@ -105,6 +107,7 @@ func diffItemMaps(was, have ItemMap) ([]string, error) { seen[k] = true // When a resource added or removed, it skips all its fields until the next resource + // Otherwise, all its fields will appear as changes if skipPrefix != "" && strings.HasPrefix(k, skipPrefix) { continue } @@ -123,11 +126,32 @@ func diffItemMaps(was, have ItemMap) ([]string, error) { } if change != nil { - result = append(result, change.String()) + result = append(result, change) } } } - return result, nil + + // Sorts changes by action, then by root type, then by root name + sort.Slice(result, func(i, j int) bool { + a, b := result[i], result[j] + if a.Action != b.Action { + return a.Action < b.Action + } + + if a.Item.Path != b.Item.Path { + return a.Item.Path < b.Item.Path + } + + // Resource comes first, then datasource + return a.RootType > b.RootType + }) + + strs := make([]string, len(result)) + for i, r := range result { + strs[i] = r.String() + } + + return strs, nil } func toMap(item *Item) (map[string]any, error) { @@ -145,7 +169,15 @@ func toMap(item *Item) (map[string]any, error) { m["enum"] = findEnums(item.Description) m["beta"] = hasBeta(item.Description) m["type"] = strValueType(item.Type) - m["elemType"] = strValueType(item.ElemType) - delete(m, "description") // Not needed to compare descriptions + m["elementType"] = strValueType(item.ElementType) + + // Not needed to compare descriptions + delete(m, "description") + + // Turns "maxItems" into "max items" for human readability + for k, v := range m { + delete(m, k) + m[strcase.ToCase(k, strcase.LowerCase, ' ')] = v + } return m, err } diff --git a/changelog/differ_test.go b/changelog/differ_test.go index cbc518132..7407102d8 100644 --- a/changelog/differ_test.go +++ b/changelog/differ_test.go @@ -11,13 +11,13 @@ func TestCompare(t *testing.T) { tests := []struct { name string expect string - kind ResourceType + kind RootType old, new *Item }{ { name: "change enums", - expect: "Change resource `foo` field `bar`: enum ~~`bar`, `baz`~~ -> `foo`, `baz`", - kind: ResourceKind, + expect: "Change `foo` resource field `bar`: enum ~~`bar`, `baz`~~ -> `foo`, `baz`", + kind: ResourceRootType, old: &Item{ Type: schema.TypeString, Path: "foo.bar", @@ -31,8 +31,8 @@ func TestCompare(t *testing.T) { }, { name: "add resource field", - expect: "Add resource `foo` field `bar`: Foo", - kind: ResourceKind, + expect: "Add `foo` resource field `bar`: Foo", + kind: ResourceRootType, new: &Item{ Type: schema.TypeString, Path: "foo.bar", @@ -41,8 +41,8 @@ func TestCompare(t *testing.T) { }, { name: "remove resource field", - expect: "Remove resource `foo` field `bar`: Foo", - kind: ResourceKind, + expect: "Remove `foo` resource field `bar`: Foo", + kind: ResourceRootType, old: &Item{ Type: schema.TypeString, Path: "foo.bar", @@ -51,8 +51,8 @@ func TestCompare(t *testing.T) { }, { name: "remove beta from the field", - expect: "Change resource `foo` field `bar`: no longer beta", - kind: ResourceKind, + expect: "Change `foo` resource field `bar`: no longer beta", + kind: ResourceRootType, old: &Item{ Type: schema.TypeString, Path: "foo.bar", @@ -66,8 +66,8 @@ func TestCompare(t *testing.T) { }, { name: "add beta resource", - expect: "Add resource `foo` _(beta)_: does stuff, PROVIDER_AIVEN_ENABLE_BETA", - kind: ResourceKind, + expect: "Add `foo` resource _(beta)_: does stuff, PROVIDER_AIVEN_ENABLE_BETA", + kind: ResourceRootType, new: &Item{ Type: schema.TypeString, Path: "foo", @@ -76,8 +76,8 @@ func TestCompare(t *testing.T) { }, { name: "change type", - expect: "Change resource `foo` field `bar`: type ~~`list`~~ -> `set`", - kind: ResourceKind, + expect: "Change `foo` resource field `bar`: type ~~`list`~~ -> `set`", + kind: ResourceRootType, old: &Item{ Type: schema.TypeList, Path: "foo.bar", diff --git a/changelog/main.go b/changelog/main.go index 067ad1343..233d7cfbd 100644 --- a/changelog/main.go +++ b/changelog/main.go @@ -155,9 +155,9 @@ func writeChangelog(_ string, entries []string) error { func fromProvider(p *schema.Provider) (ItemMap, error) { // Item names might clash between resources and data sources // Splits into separate maps - sourceMaps := map[ResourceType]map[string]*schema.Resource{ - ResourceKind: p.ResourcesMap, - DataSourceKind: p.DataSourcesMap, + sourceMaps := map[RootType]map[string]*schema.Resource{ + ResourceRootType: p.ResourcesMap, + DataSourceRootType: p.DataSourcesMap, } items := make(ItemMap) @@ -200,7 +200,7 @@ func walkSchema(name string, this *schema.Schema, parent *Item) []*Item { // Properties switch elem := this.Elem.(type) { case *schema.Schema: - item.ElemType = elem.Type + item.ElementType = elem.Type case *schema.Resource: for k, child := range elem.Schema { items = append(items, walkSchema(k, child, item)...) diff --git a/changelog/types.go b/changelog/types.go index 26f5fc472..abd2893bd 100644 --- a/changelog/types.go +++ b/changelog/types.go @@ -7,25 +7,28 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -type ( - ResourceType string - DiffType string -) +type RootType string const ( - ResourceKind ResourceType = "resource" - DataSourceKind ResourceType = "datasource" + ResourceRootType RootType = "resource" + DataSourceRootType RootType = "datasource" +) + +type DiffAction string - ChangeTypeAdd DiffType = "Add" - ChangeTypeRemove DiffType = "Remove" - ChangeTypeChange DiffType = "Change" +const ( + AddDiffAction DiffAction = "Add" + RemoveDiffAction DiffAction = "Remove" + ChangeDiffAction DiffAction = "Change" ) -type ItemMap map[ResourceType]map[string]*Item +type ItemMap map[RootType]map[string]*Item type Item struct { - Name string `json:"name"` - Path string `json:"path"` + Path string `json:"path"` // e.g. aiven_project.project + Name string `json:"name"` // e.g. project + + // Terraform schema fields Description string `json:"description"` ForceNew bool `json:"forceNew"` Optional bool `json:"optional"` @@ -33,22 +36,22 @@ type Item struct { MaxItems int `json:"maxItems"` Deprecated string `json:"deprecated"` Type schema.ValueType `json:"type"` - ElemType schema.ValueType `json:"elemType"` + ElementType schema.ValueType `json:"elementType"` } type Diff struct { - Type DiffType - ResourceType ResourceType - Description string - Item *Item + Action DiffAction + RootType RootType + Description string + Item *Item } func (c *Diff) String() string { // resource name + field name path := strings.SplitN(c.Item.Path, ".", 2) - // e.g.: "Add resource `aiven_project`" - msg := fmt.Sprintf("%s %s `%s`", c.Type, c.ResourceType, path[0]) + // e.g.: "Add `aiven_project` resource" + msg := fmt.Sprintf("%s `%s` %s", c.Action, path[0], c.RootType) // e.g.: "field `project`" if len(path) > 1 { diff --git a/docs/data-sources/account_team_project.md b/docs/data-sources/account_team_project.md index 5ff414251..4224a55c4 100644 --- a/docs/data-sources/account_team_project.md +++ b/docs/data-sources/account_team_project.md @@ -32,4 +32,4 @@ data "aiven_account_team_project" "account_team_project1" { ### Read-Only - `id` (String) The ID of this resource. -- `team_type` (String) The Account team project type. The possible values are `admin`, `operator`, `developer`, `read_only`, `project:integrations:read`, `project:networking:read`, `project:permissions:read`, `service:logs:read`, `project:services:read` and `project:audit_logs:read`. +- `team_type` (String) The Account team project type. The possible values are `admin`, `operator`, `developer`, `read_only`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `service:configuration:write`, `services:maintenance`, `service:logs:read`, `project:services:read` and `project:audit_logs:read`. diff --git a/docs/data-sources/opensearch.md b/docs/data-sources/opensearch.md index ac40e3f0f..d5b444b3e 100644 --- a/docs/data-sources/opensearch.md +++ b/docs/data-sources/opensearch.md @@ -128,6 +128,7 @@ Read-Only: - `compress` (Boolean) - `container` (String) - `endpoint_suffix` (String) +- `include_aliases` (Boolean) - `indices` (String) - `key` (String) - `restore_global_state` (Boolean) @@ -145,6 +146,7 @@ Read-Only: - `chunk_size` (String) - `compress` (Boolean) - `credentials` (String) +- `include_aliases` (Boolean) - `indices` (String) - `restore_global_state` (Boolean) - `snapshot_name` (String) @@ -449,6 +451,7 @@ Read-Only: - `chunk_size` (String) - `compress` (Boolean) - `endpoint` (String) +- `include_aliases` (Boolean) - `indices` (String) - `region` (String) - `restore_global_state` (Boolean) diff --git a/docs/data-sources/project_user.md b/docs/data-sources/project_user.md index 889026e35..3c7511bae 100644 --- a/docs/data-sources/project_user.md +++ b/docs/data-sources/project_user.md @@ -31,4 +31,4 @@ data "aiven_project_user" "mytestuser" { - `accepted` (Boolean) Whether the user has accepted the request to join the project. Users get an invite and become project members after accepting the invite. - `id` (String) The ID of this resource. -- `member_type` (String) Project membership type. The possible values are `admin`, `developer`, `operator`, `project:audit_logs:read`, `project:integrations:read`, `project:networking:read`, `project:permissions:read`, `project:services:read`, `read_only` and `service:logs:read`. +- `member_type` (String) Project membership type. The possible values are `admin`, `developer`, `operator`, `project:audit_logs:read`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `project:services:read`, `read_only`, `service:configuration:write`, `service:logs:read` and `services:maintenance`. diff --git a/docs/resources/account_team_project.md b/docs/resources/account_team_project.md index c3c7577f9..6c507dff6 100644 --- a/docs/resources/account_team_project.md +++ b/docs/resources/account_team_project.md @@ -48,7 +48,7 @@ resource "aiven_account_team_project" "main" { ### Optional - `project_name` (String) The name of an already existing project -- `team_type` (String) The Account team project type. The possible values are `admin`, `operator`, `developer`, `read_only`, `project:integrations:read`, `project:networking:read`, `project:permissions:read`, `service:logs:read`, `project:services:read` and `project:audit_logs:read`. +- `team_type` (String) The Account team project type. The possible values are `admin`, `operator`, `developer`, `read_only`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `service:configuration:write`, `services:maintenance`, `service:logs:read`, `project:services:read` and `project:audit_logs:read`. - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) ### Read-Only diff --git a/docs/resources/cassandra.md b/docs/resources/cassandra.md index 99a58ce6d..ba99e4948 100644 --- a/docs/resources/cassandra.md +++ b/docs/resources/cassandra.md @@ -86,7 +86,7 @@ Optional: Optional: -- `additional_backup_regions` (List of String, Deprecated) Additional Cloud Regions for Backup Replication. +- `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. - `backup_hour` (Number) The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed. Example: `3`. - `backup_minute` (Number) The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed. Example: `30`. - `cassandra` (Block List, Max: 1) Cassandra configuration values (see [below for nested schema](#nestedblock--cassandra_user_config--cassandra)) diff --git a/docs/resources/dragonfly.md b/docs/resources/dragonfly.md index 354123b19..04d301612 100644 --- a/docs/resources/dragonfly.md +++ b/docs/resources/dragonfly.md @@ -87,7 +87,7 @@ Read-Only: Optional: - `cache_mode` (Boolean) Evict entries when getting close to maxmemory limit. Default: `false`. -- `dragonfly_persistence` (String) Enum: `off`, `rdb`, `dfs`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked. +- `dragonfly_persistence` (String) Enum: `dfs`, `off`, `rdb`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked. - `dragonfly_ssl` (Boolean) Require SSL to access Dragonfly. Default: `true`. - `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16`. - `ip_filter_object` (Block Set, Max: 1024) Allow incoming connections from CIDR address block, e.g. `10.20.0.0/16` (see [below for nested schema](#nestedblock--dragonfly_user_config--ip_filter_object)) diff --git a/docs/resources/grafana.md b/docs/resources/grafana.md index 5d6295dc8..0a611c0e8 100644 --- a/docs/resources/grafana.md +++ b/docs/resources/grafana.md @@ -90,7 +90,7 @@ Optional: - `alerting_enabled` (Boolean) Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled. - `alerting_error_or_timeout` (String) Enum: `alerting`, `keep_state`. Default error or timeout setting for new alerting rules. - `alerting_max_annotations_to_keep` (Number) Max number of alert annotations that Grafana stores. 0 (default) keeps all alert annotations. Example: `0`. -- `alerting_nodata_or_nullvalues` (String) Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules. +- `alerting_nodata_or_nullvalues` (String) Enum: `alerting`, `keep_state`, `no_data`, `ok`. Default value for 'no data or null values' for new alerting rules. - `allow_embedding` (Boolean) Allow embedding Grafana dashboards with iframe/frame/object/embed tags. Disabled by default to limit impact of clickjacking. - `auth_azuread` (Block List, Max: 1) Azure AD OAuth integration (see [below for nested schema](#nestedblock--grafana_user_config--auth_azuread)) - `auth_basic_enabled` (Boolean) Enable or disable basic authentication form, used by Grafana built-in login. @@ -98,7 +98,7 @@ Optional: - `auth_github` (Block List, Max: 1) Github Auth integration (see [below for nested schema](#nestedblock--grafana_user_config--auth_github)) - `auth_gitlab` (Block List, Max: 1) GitLab Auth integration (see [below for nested schema](#nestedblock--grafana_user_config--auth_gitlab)) - `auth_google` (Block List, Max: 1) Google Auth integration (see [below for nested schema](#nestedblock--grafana_user_config--auth_google)) -- `cookie_samesite` (String) Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value. +- `cookie_samesite` (String) Enum: `lax`, `none`, `strict`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value. - `custom_domain` (String) Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`. - `dashboard_previews_enabled` (Boolean) This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering. - `dashboards_min_refresh_interval` (String) Signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s, 1h. Example: `5s`. @@ -126,7 +126,7 @@ Optional: - `static_ips` (Boolean) Use static public IP addresses. - `unified_alerting_enabled` (Boolean) Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details. - `user_auto_assign_org` (Boolean) Auto-assign new users on signup to main organization. Defaults to false. -- `user_auto_assign_org_role` (String) Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer. +- `user_auto_assign_org_role` (String) Enum: `Admin`, `Editor`, `Viewer`. Set role for new signups. Defaults to Viewer. - `viewers_can_edit` (Boolean) Users with view-only permission can edit but not save dashboards. - `wal` (Boolean) Setting to enable/disable Write-Ahead Logging. The default value is false (disabled). @@ -293,7 +293,7 @@ Optional: - `from_name` (String) Name used in outgoing emails, defaults to Grafana. - `password` (String, Sensitive) Password for SMTP authentication. Example: `ein0eemeev5eeth3Ahfu`. - `skip_verify` (Boolean) Skip verifying server certificate. Defaults to false. -- `starttls_policy` (String) Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS. +- `starttls_policy` (String) Enum: `MandatoryStartTLS`, `NoStartTLS`, `OpportunisticStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS. - `username` (String) Username for SMTP authentication. Example: `smtpuser`. diff --git a/docs/resources/kafka.md b/docs/resources/kafka.md index 33b45ca5f..d0718d50f 100644 --- a/docs/resources/kafka.md +++ b/docs/resources/kafka.md @@ -159,7 +159,7 @@ Optional: Optional: - `auto_create_topics_enable` (Boolean) Enable auto-creation of topics. (Default: true). -- `compression_type` (String) Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer). +- `compression_type` (String) Enum: `gzip`, `lz4`, `producer`, `snappy`, `uncompressed`, `zstd`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer). - `connections_max_idle_ms` (Number) Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: `540000`. - `default_replication_factor` (Number) Replication factor for auto-created topics (Default: 3). - `group_initial_rebalance_delay_ms` (Number) The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: `3000`. @@ -169,7 +169,7 @@ Optional: - `log_cleaner_max_compaction_lag_ms` (Number) The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)). - `log_cleaner_min_cleanable_ratio` (Number) Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: `0.5`. - `log_cleaner_min_compaction_lag_ms` (Number) The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms). -- `log_cleanup_policy` (String) Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete). +- `log_cleanup_policy` (String) Enum: `compact`, `compact,delete`, `delete`. The default cleanup policy for segments beyond the retention window (Default: delete). - `log_flush_interval_messages` (Number) The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`. - `log_flush_interval_ms` (Number) The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null). - `log_index_interval_bytes` (Number) The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: `4096`. @@ -220,10 +220,10 @@ Optional: Optional: -- `connector_client_config_override_policy` (String) Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None. +- `connector_client_config_override_policy` (String) Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None. - `consumer_auto_offset_reset` (String) Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest. - `consumer_fetch_max_bytes` (Number) Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: `52428800`. -- `consumer_isolation_level` (String) Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired. +- `consumer_isolation_level` (String) Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired. - `consumer_max_partition_fetch_bytes` (Number) Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: `1048576`. - `consumer_max_poll_interval_ms` (Number) The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000). - `consumer_max_poll_records` (Number) The maximum number of records returned in a single call to poll() (defaults to 500). @@ -231,7 +231,7 @@ Optional: - `offset_flush_timeout_ms` (Number) Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000). - `producer_batch_size` (Number) This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will `linger` for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384). - `producer_buffer_memory` (Number) The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432). -- `producer_compression_type` (String) Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression. +- `producer_compression_type` (String) Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression. - `producer_linger_ms` (Number) This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will `linger` for the specified time waiting for more records to show up. Defaults to 0. - `producer_max_request_size` (Number) This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: `1048576`. - `scheduled_rebalance_max_delay_ms` (Number) The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes. @@ -288,10 +288,10 @@ Optional: - `consumer_enable_auto_commit` (Boolean) If true the consumer's offset will be periodically committed to Kafka in the background. Default: `true`. - `consumer_request_max_bytes` (Number) Maximum number of bytes in unencoded message keys and values by a single request. Default: `67108864`. - `consumer_request_timeout_ms` (Number) Enum: `1000`, `15000`, `30000`. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default: `1000`. -- `name_strategy` (String) Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`. +- `name_strategy` (String) Enum: `record_name`, `topic_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`. - `name_strategy_validation` (Boolean) If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: `true`. -- `producer_acks` (String) Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`. -- `producer_compression_type` (String) Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression. +- `producer_acks` (String) Enum: `-1`, `0`, `1`, `all`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`. +- `producer_compression_type` (String) Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression. - `producer_linger_ms` (Number) Wait for up to the given delay to allow batching records together. Default: `0`. - `producer_max_request_size` (Number) The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: `1048576`. - `simpleconsumer_pool_size_max` (Number) Maximum number of SimpleConsumers that can be instantiated per broker. Default: `25`. diff --git a/docs/resources/kafka_connect.md b/docs/resources/kafka_connect.md index c6976029c..f5362fe52 100644 --- a/docs/resources/kafka_connect.md +++ b/docs/resources/kafka_connect.md @@ -142,10 +142,10 @@ Optional: Optional: -- `connector_client_config_override_policy` (String) Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None. +- `connector_client_config_override_policy` (String) Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None. - `consumer_auto_offset_reset` (String) Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest. - `consumer_fetch_max_bytes` (Number) Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: `52428800`. -- `consumer_isolation_level` (String) Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired. +- `consumer_isolation_level` (String) Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired. - `consumer_max_partition_fetch_bytes` (Number) Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: `1048576`. - `consumer_max_poll_interval_ms` (Number) The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000). - `consumer_max_poll_records` (Number) The maximum number of records returned in a single call to poll() (defaults to 500). @@ -153,7 +153,7 @@ Optional: - `offset_flush_timeout_ms` (Number) Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000). - `producer_batch_size` (Number) This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will `linger` for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384). - `producer_buffer_memory` (Number) The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432). -- `producer_compression_type` (String) Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression. +- `producer_compression_type` (String) Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression. - `producer_linger_ms` (Number) This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will `linger` for the specified time waiting for more records to show up. Defaults to 0. - `producer_max_request_size` (Number) This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: `1048576`. - `scheduled_rebalance_max_delay_ms` (Number) The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes. diff --git a/docs/resources/mysql.md b/docs/resources/mysql.md index d920ea288..f81376e2c 100644 --- a/docs/resources/mysql.md +++ b/docs/resources/mysql.md @@ -185,8 +185,8 @@ Optional: - `innodb_thread_concurrency` (Number) Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit). Example: `10`. - `innodb_write_io_threads` (Number) The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service. Example: `10`. - `interactive_timeout` (Number) The number of seconds the server waits for activity on an interactive connection before closing it. Example: `3600`. -- `internal_tmp_mem_storage_engine` (String) Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables. -- `log_output` (String) Enum: `INSIGHTS`, `NONE`, `TABLE`, `INSIGHTS,TABLE`. The slow log output destination when slow_query_log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow_log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow_log table, choose TABLE. To silence slow logs, choose NONE. +- `internal_tmp_mem_storage_engine` (String) Enum: `MEMORY`, `TempTable`. The storage engine for in-memory internal temporary tables. +- `log_output` (String) Enum: `INSIGHTS`, `INSIGHTS,TABLE`, `NONE`, `TABLE`. The slow log output destination when slow_query_log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow_log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow_log table, choose TABLE. To silence slow logs, choose NONE. - `long_query_time` (Number) The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Example: `10`. - `max_allowed_packet` (Number) Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M). Example: `67108864`. - `max_heap_table_size` (Number) Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M). Example: `16777216`. diff --git a/docs/resources/opensearch.md b/docs/resources/opensearch.md index 8a1da4414..9f8b6f50b 100644 --- a/docs/resources/opensearch.md +++ b/docs/resources/opensearch.md @@ -142,6 +142,7 @@ Optional: - `chunk_size` (String) Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository. - `compress` (Boolean) When set to true metadata files are stored in compressed format. - `endpoint_suffix` (String) Defines the DNS suffix for Azure Storage endpoints. +- `include_aliases` (Boolean) Whether to restore aliases alongside their associated indexes. Default is true. - `indices` (String) A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`. - `key` (String, Sensitive) Azure account secret key. One of key or sas_token should be specified. - `restore_global_state` (Boolean) If true, restore the cluster state. Defaults to false. @@ -162,6 +163,7 @@ Optional: - `chunk_size` (String) Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository. - `compress` (Boolean) When set to true metadata files are stored in compressed format. +- `include_aliases` (Boolean) Whether to restore aliases alongside their associated indexes. Default is true. - `indices` (String) A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`. - `restore_global_state` (Boolean) If true, restore the cluster state. Defaults to false. @@ -328,7 +330,7 @@ Optional: Optional: -- `mode` (String) Enum: `monitor_only`, `enforced`, `disabled`. The search backpressure mode. Valid values are monitor_only, enforced, or disabled. Default is monitor_only. +- `mode` (String) Enum: `disabled`, `enforced`, `monitor_only`. The search backpressure mode. Valid values are monitor_only, enforced, or disabled. Default is monitor_only. - `node_duress` (Block List, Max: 1) Node duress settings (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch--search_backpressure--node_duress)) - `search_shard_task` (Block List, Max: 1) Search shard settings (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch--search_backpressure--search_shard_task)) - `search_task` (Block List, Max: 1) Search task settings (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch--search_backpressure--search_task)) @@ -498,6 +500,7 @@ Optional: - `chunk_size` (String) Big files can be broken down into chunks during snapshotting if needed. Should be the same as for the 3rd party repository. - `compress` (Boolean) When set to true metadata files are stored in compressed format. - `endpoint` (String) The S3 service endpoint to connect to. If you are using an S3-compatible service then you should set this to the service’s endpoint. +- `include_aliases` (Boolean) Whether to restore aliases alongside their associated indexes. Default is true. - `indices` (String) A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`. - `restore_global_state` (Boolean) If true, restore the cluster state. Defaults to false. - `server_side_encryption` (Boolean) When set to true files are encrypted on server side. diff --git a/docs/resources/organization_group_project.md b/docs/resources/organization_group_project.md index fb372a6e0..aecb0661c 100644 --- a/docs/resources/organization_group_project.md +++ b/docs/resources/organization_group_project.md @@ -51,7 +51,7 @@ resource "aiven_organization_group_project" "example" { - `group_id` (String) The ID of the user group. - `project` (String) The project that the users in the group are members of. -- `role` (String) [Project-level role](https://aiven.io/docs/platform/reference/project-member-privileges) assigned to all users in the group. The possible values are `admin`, `operator`, `developer`, `read_only`, `project:integrations:read`, `project:networking:read`, `project:permissions:read`, `service:logs:read`, `project:services:read` and `project:audit_logs:read`. +- `role` (String) [Project-level role](https://aiven.io/docs/platform/reference/project-member-privileges) assigned to all users in the group. The possible values are `admin`, `operator`, `developer`, `read_only`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `service:configuration:write`, `services:maintenance`, `service:logs:read`, `project:services:read` and `project:audit_logs:read`. ### Optional diff --git a/docs/resources/organization_permission.md b/docs/resources/organization_permission.md index 455d5f4af..2faeefb88 100644 --- a/docs/resources/organization_permission.md +++ b/docs/resources/organization_permission.md @@ -65,7 +65,7 @@ resource "aiven_organization_permission" "developers" { Required: -- `permissions` (Set of String) List of permissions. The possible values are `admin`, `developer`, `operator`, `project:audit_logs:read`, `project:integrations:read`, `project:networking:read`, `project:permissions:read`, `project:services:read`, `read_only` and `service:logs:read`. +- `permissions` (Set of String) List of permissions. The possible values are `admin`, `developer`, `operator`, `project:audit_logs:read`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `project:services:read`, `read_only`, `service:configuration:write`, `service:logs:read` and `services:maintenance`. - `principal_id` (String) ID of the user or group. - `principal_type` (String) The type of principal. The possible values are `user` and `user_group`. diff --git a/docs/resources/pg.md b/docs/resources/pg.md index c34e410e1..0ef16702a 100644 --- a/docs/resources/pg.md +++ b/docs/resources/pg.md @@ -161,7 +161,7 @@ Optional: - `service_to_fork_from` (String) Name of another service to fork from. This has effect only when a new service is being created. Example: `anotherservicename`. - `shared_buffers_percentage` (Number) Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value. Example: `41.5`. - `static_ips` (Boolean) Use static public IP addresses. -- `synchronous_replication` (String) Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication. +- `synchronous_replication` (String) Enum: `off`, `quorum`. Synchronous replication type. Note that the service plan also needs to support synchronous replication. - `timescaledb` (Block List, Max: 1) System-wide settings for the timescaledb extension (see [below for nested schema](#nestedblock--pg_user_config--timescaledb)) - `variant` (String) Enum: `aiven`, `timescale`. Variant of the PostgreSQL service, may affect the features that are exposed by default. - `work_mem` (Number) Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB). Example: `4`. @@ -220,8 +220,8 @@ Optional: - `idle_in_transaction_session_timeout` (Number) Time out sessions with open transactions after this number of milliseconds. - `jit` (Boolean) Controls system-wide use of Just-in-Time Compilation (JIT). - `log_autovacuum_min_duration` (Number) Causes each action executed by autovacuum to be logged if it ran for at least the specified number of milliseconds. Setting this to zero logs all autovacuum actions. Minus-one (the default) disables logging autovacuum actions. -- `log_error_verbosity` (String) Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged. -- `log_line_prefix` (String) Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats. +- `log_error_verbosity` (String) Enum: `DEFAULT`, `TERSE`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged. +- `log_line_prefix` (String) Enum: `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats. - `log_min_duration_statement` (Number) Log statements that take more than this number of milliseconds to run, -1 disables. - `log_temp_files` (Number) Log statements for each temporary file created larger than this number of kilobytes, -1 disables. - `max_files_per_process` (Number) PostgreSQL maximum number of files that can be open per process. @@ -242,12 +242,12 @@ Optional: - `pg_partman_bgw__dot__role` (String) Controls which role to use for pg_partman's scheduled background tasks. Example: `myrolename`. - `pg_stat_monitor__dot__pgsm_enable_query_plan` (Boolean) Enables or disables query plan monitoring. - `pg_stat_monitor__dot__pgsm_max_buckets` (Number) Sets the maximum number of buckets. Example: `10`. -- `pg_stat_statements__dot__track` (String) Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top. +- `pg_stat_statements__dot__track` (String) Enum: `all`, `none`, `top`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top. - `temp_file_limit` (Number) PostgreSQL temporary file limit in KiB, -1 for unlimited. Example: `5000000`. - `timezone` (String) PostgreSQL service timezone. Example: `Europe/Helsinki`. - `track_activity_query_size` (Number) Specifies the number of bytes reserved to track the currently executing command for each active session. Example: `1024`. - `track_commit_timestamp` (String) Enum: `off`, `on`. Record commit time of transactions. -- `track_functions` (String) Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used. +- `track_functions` (String) Enum: `all`, `none`, `pl`. Enables tracking of function call counts and time used. - `track_io_timing` (String) Enum: `off`, `on`. Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms. - `wal_sender_timeout` (Number) Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout. Example: `60000`. - `wal_writer_delay` (Number) WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance. Example: `50`. @@ -293,7 +293,7 @@ Optional: - `autodb_idle_timeout` (Number) If the automatically created database pools have been unused this many seconds, they are freed. If 0 then timeout is disabled. (seconds). Default: `3600`. - `autodb_max_db_connections` (Number) Do not allow more than this many server connections per database (regardless of user). Setting it to 0 means unlimited. Example: `0`. -- `autodb_pool_mode` (String) Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. Default: `transaction`. +- `autodb_pool_mode` (String) Enum: `session`, `statement`, `transaction`. PGBouncer pool mode. Default: `transaction`. - `autodb_pool_size` (Number) If non-zero then create automatically a pool of that size per user when a pool doesn't exist. Default: `0`. - `ignore_startup_parameters` (List of String) List of parameters to ignore when given in startup packet. - `max_prepared_statements` (Number) PgBouncer tracks protocol-level named prepared statements related commands sent by the client in transaction and statement pooling modes when max_prepared_statements is set to a non-zero value. Setting it to 0 disables prepared statements. max_prepared_statements defaults to 100, and its maximum is 3000. Default: `100`. diff --git a/docs/resources/project_user.md b/docs/resources/project_user.md index 384ab018a..b24c5f67d 100644 --- a/docs/resources/project_user.md +++ b/docs/resources/project_user.md @@ -33,7 +33,7 @@ resource "aiven_project_user" "mytestuser" { ### Required - `email` (String) Email address of the user in lowercase. Changing this property forces recreation of the resource. -- `member_type` (String) Project membership type. The possible values are `admin`, `developer`, `operator`, `project:audit_logs:read`, `project:integrations:read`, `project:networking:read`, `project:permissions:read`, `project:services:read`, `read_only` and `service:logs:read`. +- `member_type` (String) Project membership type. The possible values are `admin`, `developer`, `operator`, `project:audit_logs:read`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `project:services:read`, `read_only`, `service:configuration:write`, `service:logs:read` and `services:maintenance`. - `project` (String) The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource. ### Optional diff --git a/docs/resources/redis.md b/docs/resources/redis.md index c2fa5105e..9da99480d 100644 --- a/docs/resources/redis.md +++ b/docs/resources/redis.md @@ -108,7 +108,7 @@ Optional: - `redis_io_threads` (Number) Set Redis IO thread count. Changing this will cause a restart of the Redis service. Example: `1`. - `redis_lfu_decay_time` (Number) LFU maxmemory-policy counter decay time in minutes. Default: `1`. - `redis_lfu_log_factor` (Number) Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. Default: `10`. -- `redis_maxmemory_policy` (String) Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. Default: `noeviction`. +- `redis_maxmemory_policy` (String) Enum: `allkeys-lfu`, `allkeys-lru`, `allkeys-random`, `noeviction`, `volatile-lfu`, `volatile-lru`, `volatile-random`, `volatile-ttl`. Redis maxmemory-policy. Default: `noeviction`. - `redis_notify_keyspace_events` (String) Set notify-keyspace-events option. - `redis_number_of_databases` (Number) Set number of Redis databases. Changing this will cause a restart of the Redis service. Example: `16`. - `redis_persistence` (String) Enum: `off`, `rdb`. When persistence is `rdb`, Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked. diff --git a/docs/resources/service_integration.md b/docs/resources/service_integration.md index 580aa2dd1..c9ac9e0d0 100644 --- a/docs/resources/service_integration.md +++ b/docs/resources/service_integration.md @@ -101,14 +101,14 @@ Optional: Required: - `columns` (Block List, Min: 1, Max: 100) Table columns (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--columns)) -- `data_format` (String) Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`, `Parquet`. Message data format. Default: `JSONEachRow`. +- `data_format` (String) Enum: `Avro`, `AvroConfluent`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `Parquet`, `RawBLOB`, `TSKV`, `TSV`, `TabSeparated`. Message data format. Default: `JSONEachRow`. - `group_name` (String) Kafka consumers group. Default: `clickhouse`. - `name` (String) Name of the table. Example: `events`. - `topics` (Block List, Min: 1, Max: 100) Kafka topics (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--topics)) Optional: -- `auto_offset_reset` (String) Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`. +- `auto_offset_reset` (String) Enum: `beginning`, `earliest`, `end`, `largest`, `latest`, `smallest`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`. - `date_time_input_format` (String) Enum: `basic`, `best_effort`, `best_effort_us`. Method to read DateTime from text input formats. Default: `basic`. - `handle_error_mode` (String) Enum: `default`, `stream`. How to handle errors for Kafka engine. Default: `default`. - `max_block_size` (Number) Number of row collected by poll(s) for flushing data from Kafka. Default: `0`. @@ -313,7 +313,7 @@ Optional: - `consumer_max_poll_records` (Number) Set consumer max.poll.records. The default is 500. Example: `500`. - `producer_batch_size` (Number) The batch size in bytes producer will attempt to collect before publishing to broker. Example: `1024`. - `producer_buffer_memory` (Number) The amount of bytes producer can use for buffering data before publishing to broker. Example: `8388608`. -- `producer_compression_type` (String) Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression. +- `producer_compression_type` (String) Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression. - `producer_linger_ms` (Number) The linger time (ms) for waiting new data to arrive for publishing. Example: `100`. - `producer_max_request_size` (Number) The maximum request size in bytes. Example: `1048576`. diff --git a/docs/resources/service_integration_endpoint.md b/docs/resources/service_integration_endpoint.md index beb2f2747..9abb89273 100644 --- a/docs/resources/service_integration_endpoint.md +++ b/docs/resources/service_integration_endpoint.md @@ -108,7 +108,7 @@ Optional: - `kafka_consumer_check_instances` (Number) Number of separate instances to fetch kafka consumer statistics with. Example: `8`. - `kafka_consumer_stats_timeout` (Number) Number of seconds that datadog will wait to get consumer statistics from brokers. Example: `60`. - `max_partition_contexts` (Number) Maximum number of partition contexts to send. Example: `32000`. -- `site` (String) Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com. +- `site` (String) Enum: `ap1.datadoghq.com`, `datadoghq.com`, `datadoghq.eu`, `ddog-gov.com`, `us3.datadoghq.com`, `us5.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com. ### Nested Schema for `datadog_user_config.datadog_tags` @@ -212,7 +212,7 @@ Required: Required: - `bootstrap_servers` (String) Bootstrap servers. Example: `10.0.0.1:9092,10.0.0.2:9092`. -- `security_protocol` (String) Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol. +- `security_protocol` (String) Enum: `PLAINTEXT`, `SASL_PLAINTEXT`, `SASL_SSL`, `SSL`. Security protocol. Optional: @@ -291,7 +291,7 @@ Optional: - `ssl_client_key` (String, Sensitive) Client key. Example: `-----BEGIN PRIVATE KEY----- ... -----END PRIVATE KEY-----`. -- `ssl_mode` (String) Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`. +- `ssl_mode` (String) Enum: `allow`, `disable`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`. - `ssl_root_cert` (String) SSL Root Cert. Example: `-----BEGIN CERTIFICATE----- ... -----END CERTIFICATE----- @@ -313,7 +313,7 @@ Optional: Required: -- `authentication` (String) Enum: `none`, `basic`. Authentication method. +- `authentication` (String) Enum: `basic`, `none`. Authentication method. - `url` (String) Schema Registry URL. Example: `https://schema-registry.kafka.company.com:28419`. Optional: @@ -345,7 +345,7 @@ Optional: Required: -- `format` (String) Enum: `rfc5424`, `rfc3164`, `custom`. Message format. Default: `rfc5424`. +- `format` (String) Enum: `custom`, `rfc3164`, `rfc5424`. Message format. Default: `rfc5424`. - `port` (Number) Rsyslog server port. Default: `514`. - `server` (String) Rsyslog server IP address or hostname. Example: `logs.example.com`. - `tls` (Boolean) Require TLS. Default: `true`. diff --git a/docs/resources/valkey.md b/docs/resources/valkey.md index b25c06088..00623f1d9 100644 --- a/docs/resources/valkey.md +++ b/docs/resources/valkey.md @@ -143,7 +143,7 @@ Optional: - `valkey_io_threads` (Number) Set Valkey IO thread count. Changing this will cause a restart of the Valkey service. Example: `1`. - `valkey_lfu_decay_time` (Number) LFU maxmemory-policy counter decay time in minutes. Default: `1`. - `valkey_lfu_log_factor` (Number) Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. Default: `10`. -- `valkey_maxmemory_policy` (String) Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Valkey maxmemory-policy. Default: `noeviction`. +- `valkey_maxmemory_policy` (String) Enum: `allkeys-lfu`, `allkeys-lru`, `allkeys-random`, `noeviction`, `volatile-lfu`, `volatile-lru`, `volatile-random`, `volatile-ttl`. Valkey maxmemory-policy. Default: `noeviction`. - `valkey_notify_keyspace_events` (String) Set notify-keyspace-events option. - `valkey_number_of_databases` (Number) Set number of Valkey databases. Changing this will cause a restart of the Valkey service. Example: `16`. - `valkey_persistence` (String) Enum: `off`, `rdb`. When persistence is `rdb`, Valkey does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is `off`, no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked. diff --git a/go.mod b/go.mod index d68d96cad..f8f30aef2 100644 --- a/go.mod +++ b/go.mod @@ -4,19 +4,21 @@ go 1.23 require ( github.com/aiven/aiven-go-client/v2 v2.30.0 - github.com/aiven/go-client-codegen v0.45.0 + github.com/aiven/go-client-codegen v0.49.0 github.com/avast/retry-go v3.0.0+incompatible github.com/dave/jennifer v1.7.1 github.com/docker/go-units v0.5.0 + github.com/ettle/strcase v0.2.0 github.com/google/go-cmp v0.6.0 github.com/gruntwork-io/terratest v0.47.2 github.com/hamba/avro/v2 v2.27.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/terraform-plugin-framework v1.12.0 - github.com/hashicorp/terraform-plugin-go v0.24.0 - github.com/hashicorp/terraform-plugin-mux v0.16.0 + github.com/hashicorp/terraform-plugin-go v0.25.0 + github.com/hashicorp/terraform-plugin-mux v0.17.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 github.com/kelseyhightower/envconfig v1.4.0 + github.com/rs/zerolog v1.33.0 github.com/samber/lo v1.47.0 github.com/stoewer/go-strcase v1.3.0 github.com/stretchr/testify v1.9.0 @@ -26,7 +28,7 @@ require ( ) require ( - cloud.google.com/go/compute/metadata v0.3.0 // indirect + cloud.google.com/go/compute/metadata v0.5.0 // indirect cloud.google.com/go/iam v1.1.6 // indirect github.com/ProtonMail/go-crypto v1.1.0-alpha.2 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect @@ -41,7 +43,6 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/rs/zerolog v1.33.0 // indirect github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 // indirect @@ -51,15 +52,15 @@ require ( go.opentelemetry.io/otel/trace v1.22.0 // indirect golang.org/x/sync v0.8.0 // indirect golang.org/x/time v0.5.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect ) require ( cloud.google.com/go v0.112.0 // indirect cloud.google.com/go/storage v1.36.0 // indirect github.com/agext/levenshtein v1.2.3 // indirect - github.com/aiven/go-api-schemas v1.95.0 + github.com/aiven/go-api-schemas v1.97.0 github.com/aws/aws-sdk-go v1.44.122 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -73,7 +74,7 @@ require ( github.com/hashicorp/go-getter v1.7.6 // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-plugin v1.6.1 // indirect + github.com/hashicorp/go-plugin v1.6.2 // indirect github.com/hashicorp/go-safetemp v1.0.0 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.7.0 // indirect @@ -111,12 +112,12 @@ require ( golang.org/x/crypto v0.28.0 // indirect golang.org/x/mod v0.21.0 // indirect golang.org/x/net v0.30.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/oauth2 v0.22.0 // indirect golang.org/x/sys v0.26.0 // indirect golang.org/x/text v0.19.0 // indirect google.golang.org/api v0.162.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect - google.golang.org/grpc v1.66.2 // indirect - google.golang.org/protobuf v1.34.2 // indirect + google.golang.org/grpc v1.67.1 // indirect + google.golang.org/protobuf v1.35.1 // indirect ) diff --git a/go.sum b/go.sum index 900741130..bc862ab19 100644 --- a/go.sum +++ b/go.sum @@ -68,8 +68,8 @@ cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= -cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= @@ -197,10 +197,10 @@ github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7l github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/aiven/aiven-go-client/v2 v2.30.0 h1:dj1nRuO1XglnwH2IwKmqEl3SGaqKebDoxnd+SRjxQRg= github.com/aiven/aiven-go-client/v2 v2.30.0/go.mod h1:Eyxa+fNgayObmUBW94uJuEkyOe1646cEpjFzhm/NETY= -github.com/aiven/go-api-schemas v1.95.0 h1:3xGqlX1dwixNDZkUEpm4sn+qjwq8yb0usKubCx3icVA= -github.com/aiven/go-api-schemas v1.95.0/go.mod h1:ATdCq7aRp+URkFI4W4mVZaNZgoN5FRicXKH3c2fpgW4= -github.com/aiven/go-client-codegen v0.45.0 h1:5+5eCN42Qb0QegJSYDw7WCi3z1IHemFyRxzJBN2TnaQ= -github.com/aiven/go-client-codegen v0.45.0/go.mod h1:FfbH32Xb+Hx5zeKTIug1Y8SfMeB+AKNRzxgrzkts2oA= +github.com/aiven/go-api-schemas v1.97.0 h1:/ykiTRissDfcrgiDyNyOx8PvByeMRHZt2ikwqUVruNA= +github.com/aiven/go-api-schemas v1.97.0/go.mod h1:+E57naSaeLp5l8x7CPEBOMiuAbPNYyMiisPqx3NTqmM= +github.com/aiven/go-client-codegen v0.49.0 h1:KwpKVykr4xWcduQ4h7+ad7Y1rAYj9Z/Tq5iKhu7/guo= +github.com/aiven/go-client-codegen v0.49.0/go.mod h1:FfbH32Xb+Hx5zeKTIug1Y8SfMeB+AKNRzxgrzkts2oA= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= @@ -235,8 +235,8 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 h1:N+3sFI5GUjRKBi+i0TxYVST9h4Ie192jJWpHvthBBgg= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= @@ -260,8 +260,10 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= +github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q= +github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= @@ -411,8 +413,8 @@ github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB1 github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.6.1 h1:P7MR2UP6gNKGPp+y7EZw2kOiq4IR9WiqLvp0XOsVdwI= -github.com/hashicorp/go-plugin v1.6.1/go.mod h1:XPHFku2tFo3o3QKFgSYo+cghcUhw1NA1hZyMK0PWAw0= +github.com/hashicorp/go-plugin v1.6.2 h1:zdGAEd0V1lCaU0u+MxWQhtSDQmahpkwOun8U8EiRVog= +github.com/hashicorp/go-plugin v1.6.2/go.mod h1:CkgLQ5CZqNmdL9U9JzM532t8ZiYQ35+pj3b1FD37R0Q= github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= @@ -442,12 +444,12 @@ github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 h1:gm5b1kHgFFhaK github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1/go.mod h1:MsjL1sQ9L7wGwzJ5RjcI6FzEMdyoBnw+XK8ZnOvQOLY= github.com/hashicorp/terraform-plugin-framework-validators v0.14.0 h1:3PCn9iyzdVOgHYOBmncpSSOxjQhCTYmc+PGvbdlqSaI= github.com/hashicorp/terraform-plugin-framework-validators v0.14.0/go.mod h1:LwDKNdzxrDY/mHBrlC6aYfE2fQ3Dk3gaJD64vNiXvo4= -github.com/hashicorp/terraform-plugin-go v0.24.0 h1:2WpHhginCdVhFIrWHxDEg6RBn3YaWzR2o6qUeIEat2U= -github.com/hashicorp/terraform-plugin-go v0.24.0/go.mod h1:tUQ53lAsOyYSckFGEefGC5C8BAaO0ENqzFd3bQeuYQg= +github.com/hashicorp/terraform-plugin-go v0.25.0 h1:oi13cx7xXA6QciMcpcFi/rwA974rdTxjqEhXJjbAyks= +github.com/hashicorp/terraform-plugin-go v0.25.0/go.mod h1:+SYagMYadJP86Kvn+TGeV+ofr/R3g4/If0O5sO96MVw= github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow= -github.com/hashicorp/terraform-plugin-mux v0.16.0 h1:RCzXHGDYwUwwqfYYWJKBFaS3fQsWn/ZECEiW7p2023I= -github.com/hashicorp/terraform-plugin-mux v0.16.0/go.mod h1:PF79mAsPc8CpusXPfEVa4X8PtkB+ngWoiUClMrNZlYo= +github.com/hashicorp/terraform-plugin-mux v0.17.0 h1:/J3vv3Ps2ISkbLPiZOLspFcIZ0v5ycUXCEQScudGCCw= +github.com/hashicorp/terraform-plugin-mux v0.17.0/go.mod h1:yWuM9U1Jg8DryNfvCp+lH70WcYv6D8aooQxxxIzFDsE= github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 h1:kJiWGx2kiQVo97Y5IOGR4EMcZ8DtMswHhUuFibsCQQE= github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0/go.mod h1:sl/UoabMc37HA6ICVMmGO+/0wofkVIRxf+BMb/dnoIg= github.com/hashicorp/terraform-plugin-testing v1.10.0 h1:2+tmRNhvnfE4Bs8rB6v58S/VpqzGC6RCh9Y8ujdn+aw= @@ -744,8 +746,8 @@ golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= +golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1081,10 +1083,10 @@ google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= -google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 h1:+rdxYoE3E5htTEWIe15GlN6IfvbURM//Jt0mmkmm6ZU= -google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117/go.mod h1:OimBR/bc1wPO9iV4NC2bpyjy3VnAwZh5EBPQdtaE5oo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 h1:1GBuWVLM/KMVUv1t1En5Gs+gFZCNd360GGb4sSxtrhU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1120,8 +1122,8 @@ google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACu google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= -google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1138,8 +1140,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/sdkprovider/service/kafkaschema/kafka_schema.go b/internal/sdkprovider/service/kafkaschema/kafka_schema.go index 1604f158a..c8755746d 100644 --- a/internal/sdkprovider/service/kafkaschema/kafka_schema.go +++ b/internal/sdkprovider/service/kafkaschema/kafka_schema.go @@ -6,6 +6,7 @@ import ( "fmt" "reflect" "regexp" + "strings" "time" "github.com/aiven/aiven-go-client/v2" @@ -301,7 +302,7 @@ func resourceKafkaSchemaCustomizeDiff(ctx context.Context, d *schema.ResourceDif return nil } - compatible, err := client.ServiceSchemaRegistryCompatibility( + r, err := client.ServiceSchemaRegistryCompatibility( ctx, d.Get("project").(string), d.Get("service_name").(string), @@ -317,8 +318,8 @@ func resourceKafkaSchemaCustomizeDiff(ctx context.Context, d *schema.ResourceDif return fmt.Errorf("unable to check schema validity: %w", err) } - if !compatible { - return fmt.Errorf("schema is not compatible with previous version") + if !r.IsCompatible { + return fmt.Errorf("schema is not compatible with previous version: %s", strings.Join(r.Messages, ", ")) } return nil diff --git a/internal/sdkprovider/userconfig/service/cassandra.go b/internal/sdkprovider/userconfig/service/cassandra.go index 235f50f61..1dfd50851 100644 --- a/internal/sdkprovider/userconfig/service/cassandra.go +++ b/internal/sdkprovider/userconfig/service/cassandra.go @@ -14,7 +14,6 @@ func cassandraUserConfig() *schema.Schema { DiffSuppressFunc: diff.SuppressUnchanged, Elem: &schema.Resource{Schema: map[string]*schema.Schema{ "additional_backup_regions": { - Deprecated: "This property is deprecated.", Description: "Additional Cloud Regions for Backup Replication.", Elem: &schema.Schema{ Description: "Target cloud. Example: `aws-eu-central-1`.", diff --git a/internal/sdkprovider/userconfig/service/dragonfly.go b/internal/sdkprovider/userconfig/service/dragonfly.go index 6a6818846..fdac1e106 100644 --- a/internal/sdkprovider/userconfig/service/dragonfly.go +++ b/internal/sdkprovider/userconfig/service/dragonfly.go @@ -20,10 +20,10 @@ func dragonflyUserConfig() *schema.Schema { Type: schema.TypeBool, }, "dragonfly_persistence": { - Description: "Enum: `off`, `rdb`, `dfs`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.", + Description: "Enum: `dfs`, `off`, `rdb`. When persistence is `rdb` or `dfs`, Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is `off`, no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"off", "rdb", "dfs"}, false), + ValidateFunc: validation.StringInSlice([]string{"dfs", "off", "rdb"}, false), }, "dragonfly_ssl": { Description: "Require SSL to access Dragonfly. Default: `true`.", diff --git a/internal/sdkprovider/userconfig/service/grafana.go b/internal/sdkprovider/userconfig/service/grafana.go index 6a25cfbd1..9fa20c7d9 100644 --- a/internal/sdkprovider/userconfig/service/grafana.go +++ b/internal/sdkprovider/userconfig/service/grafana.go @@ -41,10 +41,10 @@ func grafanaUserConfig() *schema.Schema { Type: schema.TypeInt, }, "alerting_nodata_or_nullvalues": { - Description: "Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules.", + Description: "Enum: `alerting`, `keep_state`, `no_data`, `ok`. Default value for 'no data or null values' for new alerting rules.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"alerting", "no_data", "keep_state", "ok"}, false), + ValidateFunc: validation.StringInSlice([]string{"alerting", "keep_state", "no_data", "ok"}, false), }, "allow_embedding": { Description: "Allow embedding Grafana dashboards with iframe/frame/object/embed tags. Disabled by default to limit impact of clickjacking.", @@ -332,10 +332,10 @@ func grafanaUserConfig() *schema.Schema { Type: schema.TypeList, }, "cookie_samesite": { - Description: "Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value.", + Description: "Enum: `lax`, `none`, `strict`. Cookie SameSite attribute: `strict` prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. `lax` is the default value.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"lax", "strict", "none"}, false), + ValidateFunc: validation.StringInSlice([]string{"lax", "none", "strict"}, false), }, "custom_domain": { Description: "Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: `grafana.example.org`.", @@ -599,10 +599,10 @@ func grafanaUserConfig() *schema.Schema { Type: schema.TypeBool, }, "starttls_policy": { - Description: "Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.", + Description: "Enum: `MandatoryStartTLS`, `NoStartTLS`, `OpportunisticStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"OpportunisticStartTLS", "MandatoryStartTLS", "NoStartTLS"}, false), + ValidateFunc: validation.StringInSlice([]string{"MandatoryStartTLS", "NoStartTLS", "OpportunisticStartTLS"}, false), }, "username": { Description: "Username for SMTP authentication. Example: `smtpuser`.", @@ -630,10 +630,10 @@ func grafanaUserConfig() *schema.Schema { Type: schema.TypeBool, }, "user_auto_assign_org_role": { - Description: "Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer.", + Description: "Enum: `Admin`, `Editor`, `Viewer`. Set role for new signups. Defaults to Viewer.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"Viewer", "Admin", "Editor"}, false), + ValidateFunc: validation.StringInSlice([]string{"Admin", "Editor", "Viewer"}, false), }, "viewers_can_edit": { Description: "Users with view-only permission can edit but not save dashboards.", diff --git a/internal/sdkprovider/userconfig/service/kafka.go b/internal/sdkprovider/userconfig/service/kafka.go index 15361f3ba..104701bbf 100644 --- a/internal/sdkprovider/userconfig/service/kafka.go +++ b/internal/sdkprovider/userconfig/service/kafka.go @@ -94,10 +94,10 @@ func kafkaUserConfig() *schema.Schema { Type: schema.TypeBool, }, "compression_type": { - Description: "Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).", + Description: "Enum: `gzip`, `lz4`, `producer`, `snappy`, `uncompressed`, `zstd`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `uncompressed` which is equivalent to no compression; and `producer` which means retain the original compression codec set by the producer.(Default: producer).", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"gzip", "snappy", "lz4", "zstd", "uncompressed", "producer"}, false), + ValidateFunc: validation.StringInSlice([]string{"gzip", "lz4", "producer", "snappy", "uncompressed", "zstd"}, false), }, "connections_max_idle_ms": { Description: "Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: `540000`.", @@ -145,10 +145,10 @@ func kafkaUserConfig() *schema.Schema { Type: schema.TypeInt, }, "log_cleanup_policy": { - Description: "Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window (Default: delete).", + Description: "Enum: `compact`, `compact,delete`, `delete`. The default cleanup policy for segments beyond the retention window (Default: delete).", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"delete", "compact", "compact,delete"}, false), + ValidateFunc: validation.StringInSlice([]string{"compact", "compact,delete", "delete"}, false), }, "log_flush_interval_messages": { Description: "The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)). Example: `9223372036854775807`.", @@ -353,10 +353,10 @@ func kafkaUserConfig() *schema.Schema { Description: "Kafka Connect configuration values", Elem: &schema.Resource{Schema: map[string]*schema.Schema{ "connector_client_config_override_policy": { - Description: "Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.", + Description: "Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"None", "All"}, false), + ValidateFunc: validation.StringInSlice([]string{"All", "None"}, false), }, "consumer_auto_offset_reset": { Description: "Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.", @@ -370,10 +370,10 @@ func kafkaUserConfig() *schema.Schema { Type: schema.TypeInt, }, "consumer_isolation_level": { - Description: "Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.", + Description: "Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"read_uncommitted", "read_committed"}, false), + ValidateFunc: validation.StringInSlice([]string{"read_committed", "read_uncommitted"}, false), }, "consumer_max_partition_fetch_bytes": { Description: "Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: `1048576`.", @@ -411,10 +411,10 @@ func kafkaUserConfig() *schema.Schema { Type: schema.TypeInt, }, "producer_compression_type": { - Description: "Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.", + Description: "Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"gzip", "snappy", "lz4", "zstd", "none"}, false), + ValidateFunc: validation.StringInSlice([]string{"gzip", "lz4", "none", "snappy", "zstd"}, false), }, "producer_linger_ms": { Description: "This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will `linger` for the specified time waiting for more records to show up. Defaults to 0.", @@ -548,10 +548,10 @@ func kafkaUserConfig() *schema.Schema { ValidateFunc: validation.IntInSlice([]int{1000, 15000, 30000}), }, "name_strategy": { - Description: "Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.", + Description: "Enum: `record_name`, `topic_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. Default: `topic_name`.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"topic_name", "record_name", "topic_record_name"}, false), + ValidateFunc: validation.StringInSlice([]string{"record_name", "topic_name", "topic_record_name"}, false), }, "name_strategy_validation": { Description: "If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: `true`.", @@ -559,16 +559,16 @@ func kafkaUserConfig() *schema.Schema { Type: schema.TypeBool, }, "producer_acks": { - Description: "Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.", + Description: "Enum: `-1`, `0`, `1`, `all`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to `all` or `-1`, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: `1`.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"all", "-1", "0", "1"}, false), + ValidateFunc: validation.StringInSlice([]string{"-1", "0", "1", "all"}, false), }, "producer_compression_type": { - Description: "Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.", + Description: "Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"gzip", "snappy", "lz4", "zstd", "none"}, false), + ValidateFunc: validation.StringInSlice([]string{"gzip", "lz4", "none", "snappy", "zstd"}, false), }, "producer_linger_ms": { Description: "Wait for up to the given delay to allow batching records together. Default: `0`.", diff --git a/internal/sdkprovider/userconfig/service/kafka_connect.go b/internal/sdkprovider/userconfig/service/kafka_connect.go index dc44309c9..8fb831730 100644 --- a/internal/sdkprovider/userconfig/service/kafka_connect.go +++ b/internal/sdkprovider/userconfig/service/kafka_connect.go @@ -68,10 +68,10 @@ func kafkaConnectUserConfig() *schema.Schema { Description: "Kafka Connect configuration values", Elem: &schema.Resource{Schema: map[string]*schema.Schema{ "connector_client_config_override_policy": { - Description: "Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.", + Description: "Enum: `All`, `None`. Defines what client configurations can be overridden by the connector. Default is None.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"None", "All"}, false), + ValidateFunc: validation.StringInSlice([]string{"All", "None"}, false), }, "consumer_auto_offset_reset": { Description: "Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.", @@ -85,10 +85,10 @@ func kafkaConnectUserConfig() *schema.Schema { Type: schema.TypeInt, }, "consumer_isolation_level": { - Description: "Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.", + Description: "Enum: `read_committed`, `read_uncommitted`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"read_uncommitted", "read_committed"}, false), + ValidateFunc: validation.StringInSlice([]string{"read_committed", "read_uncommitted"}, false), }, "consumer_max_partition_fetch_bytes": { Description: "Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: `1048576`.", @@ -126,10 +126,10 @@ func kafkaConnectUserConfig() *schema.Schema { Type: schema.TypeInt, }, "producer_compression_type": { - Description: "Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.", + Description: "Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"gzip", "snappy", "lz4", "zstd", "none"}, false), + ValidateFunc: validation.StringInSlice([]string{"gzip", "lz4", "none", "snappy", "zstd"}, false), }, "producer_linger_ms": { Description: "This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will `linger` for the specified time waiting for more records to show up. Defaults to 0.", diff --git a/internal/sdkprovider/userconfig/service/mysql.go b/internal/sdkprovider/userconfig/service/mysql.go index 3122379ed..7ecfb9452 100644 --- a/internal/sdkprovider/userconfig/service/mysql.go +++ b/internal/sdkprovider/userconfig/service/mysql.go @@ -235,16 +235,16 @@ func mysqlUserConfig() *schema.Schema { Type: schema.TypeInt, }, "internal_tmp_mem_storage_engine": { - Description: "Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables.", + Description: "Enum: `MEMORY`, `TempTable`. The storage engine for in-memory internal temporary tables.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"TempTable", "MEMORY"}, false), + ValidateFunc: validation.StringInSlice([]string{"MEMORY", "TempTable"}, false), }, "log_output": { - Description: "Enum: `INSIGHTS`, `NONE`, `TABLE`, `INSIGHTS,TABLE`. The slow log output destination when slow_query_log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow_log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow_log table, choose TABLE. To silence slow logs, choose NONE.", + Description: "Enum: `INSIGHTS`, `INSIGHTS,TABLE`, `NONE`, `TABLE`. The slow log output destination when slow_query_log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow_log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow_log table, choose TABLE. To silence slow logs, choose NONE.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"INSIGHTS", "NONE", "TABLE", "INSIGHTS,TABLE"}, false), + ValidateFunc: validation.StringInSlice([]string{"INSIGHTS", "INSIGHTS,TABLE", "NONE", "TABLE"}, false), }, "long_query_time": { Description: "The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Example: `10`.", diff --git a/internal/sdkprovider/userconfig/service/opensearch.go b/internal/sdkprovider/userconfig/service/opensearch.go index 2980431a8..cd28c10d7 100644 --- a/internal/sdkprovider/userconfig/service/opensearch.go +++ b/internal/sdkprovider/userconfig/service/opensearch.go @@ -57,6 +57,11 @@ func opensearchUserConfig() *schema.Schema { Optional: true, Type: schema.TypeString, }, + "include_aliases": { + Description: "Whether to restore aliases alongside their associated indexes. Default is true.", + Optional: true, + Type: schema.TypeBool, + }, "indices": { Description: "A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.", Optional: true, @@ -128,6 +133,11 @@ func opensearchUserConfig() *schema.Schema { Sensitive: true, Type: schema.TypeString, }, + "include_aliases": { + Description: "Whether to restore aliases alongside their associated indexes. Default is true.", + Optional: true, + Type: schema.TypeBool, + }, "indices": { Description: "A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.", Optional: true, @@ -605,10 +615,10 @@ func opensearchUserConfig() *schema.Schema { Description: "Search Backpressure Settings", Elem: &schema.Resource{Schema: map[string]*schema.Schema{ "mode": { - Description: "Enum: `monitor_only`, `enforced`, `disabled`. The search backpressure mode. Valid values are monitor_only, enforced, or disabled. Default is monitor_only.", + Description: "Enum: `disabled`, `enforced`, `monitor_only`. The search backpressure mode. Valid values are monitor_only, enforced, or disabled. Default is monitor_only.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"monitor_only", "enforced", "disabled"}, false), + ValidateFunc: validation.StringInSlice([]string{"disabled", "enforced", "monitor_only"}, false), }, "node_duress": { Description: "Node duress settings", @@ -1019,6 +1029,11 @@ func opensearchUserConfig() *schema.Schema { Optional: true, Type: schema.TypeString, }, + "include_aliases": { + Description: "Whether to restore aliases alongside their associated indexes. Default is true.", + Optional: true, + Type: schema.TypeBool, + }, "indices": { Description: "A comma-delimited list of indices to restore from the snapshot. Multi-index syntax is supported. By default, a restore operation includes all data streams and indices in the snapshot. If this argument is provided, the restore operation only includes the data streams and indices that you specify. Example: `metrics*,logs*,data-20240823`.", Optional: true, diff --git a/internal/sdkprovider/userconfig/service/pg.go b/internal/sdkprovider/userconfig/service/pg.go index a1fc426b2..2582738ee 100644 --- a/internal/sdkprovider/userconfig/service/pg.go +++ b/internal/sdkprovider/userconfig/service/pg.go @@ -242,16 +242,16 @@ func pgUserConfig() *schema.Schema { Type: schema.TypeInt, }, "log_error_verbosity": { - Description: "Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.", + Description: "Enum: `DEFAULT`, `TERSE`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"TERSE", "DEFAULT", "VERBOSE"}, false), + ValidateFunc: validation.StringInSlice([]string{"DEFAULT", "TERSE", "VERBOSE"}, false), }, "log_line_prefix": { - Description: "Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats.", + Description: "Enum: `'%m [%p] %q[user=%u,db=%d,app=%a] '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '`. Choose from one of the available log formats.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"'pid=%p,user=%u,db=%d,app=%a,client=%h '", "'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '", "'%m [%p] %q[user=%u,db=%d,app=%a] '", "'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '"}, false), + ValidateFunc: validation.StringInSlice([]string{"'%m [%p] %q[user=%u,db=%d,app=%a] '", "'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '", "'pid=%p,user=%u,db=%d,app=%a,client=%h '", "'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '"}, false), }, "log_min_duration_statement": { Description: "Log statements that take more than this number of milliseconds to run, -1 disables.", @@ -354,10 +354,10 @@ func pgUserConfig() *schema.Schema { Type: schema.TypeInt, }, "pg_stat_statements__dot__track": { - Description: "Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.", + Description: "Enum: `all`, `none`, `top`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"all", "top", "none"}, false), + ValidateFunc: validation.StringInSlice([]string{"all", "none", "top"}, false), }, "temp_file_limit": { Description: "PostgreSQL temporary file limit in KiB, -1 for unlimited. Example: `5000000`.", @@ -381,10 +381,10 @@ func pgUserConfig() *schema.Schema { ValidateFunc: validation.StringInSlice([]string{"off", "on"}, false), }, "track_functions": { - Description: "Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used.", + Description: "Enum: `all`, `none`, `pl`. Enables tracking of function call counts and time used.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"all", "pl", "none"}, false), + ValidateFunc: validation.StringInSlice([]string{"all", "none", "pl"}, false), }, "track_io_timing": { Description: "Enum: `off`, `on`. Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.", @@ -580,10 +580,10 @@ func pgUserConfig() *schema.Schema { Type: schema.TypeInt, }, "autodb_pool_mode": { - Description: "Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. Default: `transaction`.", + Description: "Enum: `session`, `statement`, `transaction`. PGBouncer pool mode. Default: `transaction`.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"session", "transaction", "statement"}, false), + ValidateFunc: validation.StringInSlice([]string{"session", "statement", "transaction"}, false), }, "autodb_pool_size": { Description: "If non-zero then create automatically a pool of that size per user when a pool doesn't exist. Default: `0`.", @@ -745,10 +745,10 @@ func pgUserConfig() *schema.Schema { Type: schema.TypeBool, }, "synchronous_replication": { - Description: "Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.", + Description: "Enum: `off`, `quorum`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"quorum", "off"}, false), + ValidateFunc: validation.StringInSlice([]string{"off", "quorum"}, false), }, "timescaledb": { Description: "System-wide settings for the timescaledb extension", diff --git a/internal/sdkprovider/userconfig/service/redis.go b/internal/sdkprovider/userconfig/service/redis.go index 5f9fc77d8..fdf123724 100644 --- a/internal/sdkprovider/userconfig/service/redis.go +++ b/internal/sdkprovider/userconfig/service/redis.go @@ -215,10 +215,10 @@ func redisUserConfig() *schema.Schema { Type: schema.TypeInt, }, "redis_maxmemory_policy": { - Description: "Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. Default: `noeviction`.", + Description: "Enum: `allkeys-lfu`, `allkeys-lru`, `allkeys-random`, `noeviction`, `volatile-lfu`, `volatile-lru`, `volatile-random`, `volatile-ttl`. Redis maxmemory-policy. Default: `noeviction`.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"noeviction", "allkeys-lru", "volatile-lru", "allkeys-random", "volatile-random", "volatile-ttl", "volatile-lfu", "allkeys-lfu"}, false), + ValidateFunc: validation.StringInSlice([]string{"allkeys-lfu", "allkeys-lru", "allkeys-random", "noeviction", "volatile-lfu", "volatile-lru", "volatile-random", "volatile-ttl"}, false), }, "redis_notify_keyspace_events": { Description: "Set notify-keyspace-events option.", diff --git a/internal/sdkprovider/userconfig/service/valkey.go b/internal/sdkprovider/userconfig/service/valkey.go index 69c2d2ce3..b27e8d115 100644 --- a/internal/sdkprovider/userconfig/service/valkey.go +++ b/internal/sdkprovider/userconfig/service/valkey.go @@ -231,10 +231,10 @@ func valkeyUserConfig() *schema.Schema { Type: schema.TypeInt, }, "valkey_maxmemory_policy": { - Description: "Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Valkey maxmemory-policy. Default: `noeviction`.", + Description: "Enum: `allkeys-lfu`, `allkeys-lru`, `allkeys-random`, `noeviction`, `volatile-lfu`, `volatile-lru`, `volatile-random`, `volatile-ttl`. Valkey maxmemory-policy. Default: `noeviction`.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"noeviction", "allkeys-lru", "volatile-lru", "allkeys-random", "volatile-random", "volatile-ttl", "volatile-lfu", "allkeys-lfu"}, false), + ValidateFunc: validation.StringInSlice([]string{"allkeys-lfu", "allkeys-lru", "allkeys-random", "noeviction", "volatile-lfu", "volatile-lru", "volatile-random", "volatile-ttl"}, false), }, "valkey_notify_keyspace_events": { Description: "Set notify-keyspace-events option.", diff --git a/internal/sdkprovider/userconfig/serviceintegration/clickhouse_kafka.go b/internal/sdkprovider/userconfig/serviceintegration/clickhouse_kafka.go index 33508de24..dc16ca875 100644 --- a/internal/sdkprovider/userconfig/serviceintegration/clickhouse_kafka.go +++ b/internal/sdkprovider/userconfig/serviceintegration/clickhouse_kafka.go @@ -17,10 +17,10 @@ func clickhouseKafkaUserConfig() *schema.Schema { Description: "Tables to create", Elem: &schema.Resource{Schema: map[string]*schema.Schema{ "auto_offset_reset": { - Description: "Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`.", + Description: "Enum: `beginning`, `earliest`, `end`, `largest`, `latest`, `smallest`. Action to take when there is no initial offset in offset store or the desired offset is out of range. Default: `earliest`.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"smallest", "earliest", "beginning", "largest", "latest", "end"}, false), + ValidateFunc: validation.StringInSlice([]string{"beginning", "earliest", "end", "largest", "latest", "smallest"}, false), }, "columns": { Description: "Table columns", @@ -41,10 +41,10 @@ func clickhouseKafkaUserConfig() *schema.Schema { Type: schema.TypeList, }, "data_format": { - Description: "Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`, `Parquet`. Message data format. Default: `JSONEachRow`.", + Description: "Enum: `Avro`, `AvroConfluent`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `Parquet`, `RawBLOB`, `TSKV`, `TSV`, `TabSeparated`. Message data format. Default: `JSONEachRow`.", Required: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"Avro", "CSV", "JSONAsString", "JSONCompactEachRow", "JSONCompactStringsEachRow", "JSONEachRow", "JSONStringsEachRow", "MsgPack", "TSKV", "TSV", "TabSeparated", "RawBLOB", "AvroConfluent", "Parquet"}, false), + ValidateFunc: validation.StringInSlice([]string{"Avro", "AvroConfluent", "CSV", "JSONAsString", "JSONCompactEachRow", "JSONCompactStringsEachRow", "JSONEachRow", "JSONStringsEachRow", "MsgPack", "Parquet", "RawBLOB", "TSKV", "TSV", "TabSeparated"}, false), }, "date_time_input_format": { Description: "Enum: `basic`, `best_effort`, `best_effort_us`. Method to read DateTime from text input formats. Default: `basic`.", diff --git a/internal/sdkprovider/userconfig/serviceintegration/datadog.go b/internal/sdkprovider/userconfig/serviceintegration/datadog.go index ffee5b531..06b3e0307 100644 --- a/internal/sdkprovider/userconfig/serviceintegration/datadog.go +++ b/internal/sdkprovider/userconfig/serviceintegration/datadog.go @@ -85,8 +85,9 @@ func datadogUserConfig() *schema.Schema { "kafka_custom_metrics": { Description: "List of custom metrics.", Elem: &schema.Schema{ - Description: "Metric name. Example: `kafka.log.log_size`.", - Type: schema.TypeString, + Description: "Enum: `kafka.log.log_end_offset`, `kafka.log.log_size`, `kafka.log.log_start_offset`. Metric name.", + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"kafka.log.log_end_offset", "kafka.log.log_size", "kafka.log.log_start_offset"}, false), }, MaxItems: 1024, Optional: true, diff --git a/internal/sdkprovider/userconfig/serviceintegration/external_aws_cloudwatch_logs.go b/internal/sdkprovider/userconfig/serviceintegration/external_aws_cloudwatch_logs.go index d81f8d8a5..5919de83a 100644 --- a/internal/sdkprovider/userconfig/serviceintegration/external_aws_cloudwatch_logs.go +++ b/internal/sdkprovider/userconfig/serviceintegration/external_aws_cloudwatch_logs.go @@ -16,9 +16,9 @@ func externalAwsCloudwatchLogsUserConfig() *schema.Schema { Elem: &schema.Resource{Schema: map[string]*schema.Schema{"selected_log_fields": { Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", Elem: &schema.Schema{ - Description: "Enum: `HOSTNAME`, `PRIORITY`, `REALTIME_TIMESTAMP`, `service_name`, `SYSTEMD_UNIT`. Log field name.", + Description: "Enum: `HOSTNAME`, `PRIORITY`, `REALTIME_TIMESTAMP`, `SYSTEMD_UNIT`, `service_name`. Log field name.", Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "service_name", "SYSTEMD_UNIT"}, false), + ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "SYSTEMD_UNIT", "service_name"}, false), }, MaxItems: 5, Optional: true, diff --git a/internal/sdkprovider/userconfig/serviceintegration/external_elasticsearch_logs.go b/internal/sdkprovider/userconfig/serviceintegration/external_elasticsearch_logs.go index 61f0fa4c3..04609ac84 100644 --- a/internal/sdkprovider/userconfig/serviceintegration/external_elasticsearch_logs.go +++ b/internal/sdkprovider/userconfig/serviceintegration/external_elasticsearch_logs.go @@ -16,9 +16,9 @@ func externalElasticsearchLogsUserConfig() *schema.Schema { Elem: &schema.Resource{Schema: map[string]*schema.Schema{"selected_log_fields": { Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", Elem: &schema.Schema{ - Description: "Enum: `HOSTNAME`, `PRIORITY`, `REALTIME_TIMESTAMP`, `service_name`, `SYSTEMD_UNIT`. Log field name.", + Description: "Enum: `HOSTNAME`, `PRIORITY`, `REALTIME_TIMESTAMP`, `SYSTEMD_UNIT`, `service_name`. Log field name.", Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "service_name", "SYSTEMD_UNIT"}, false), + ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "SYSTEMD_UNIT", "service_name"}, false), }, MaxItems: 5, Optional: true, diff --git a/internal/sdkprovider/userconfig/serviceintegration/external_opensearch_logs.go b/internal/sdkprovider/userconfig/serviceintegration/external_opensearch_logs.go index 5ff9a4d65..5b67c1bd8 100644 --- a/internal/sdkprovider/userconfig/serviceintegration/external_opensearch_logs.go +++ b/internal/sdkprovider/userconfig/serviceintegration/external_opensearch_logs.go @@ -16,9 +16,9 @@ func externalOpensearchLogsUserConfig() *schema.Schema { Elem: &schema.Resource{Schema: map[string]*schema.Schema{"selected_log_fields": { Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", Elem: &schema.Schema{ - Description: "Enum: `HOSTNAME`, `PRIORITY`, `REALTIME_TIMESTAMP`, `service_name`, `SYSTEMD_UNIT`. Log field name.", + Description: "Enum: `HOSTNAME`, `PRIORITY`, `REALTIME_TIMESTAMP`, `SYSTEMD_UNIT`, `service_name`. Log field name.", Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "service_name", "SYSTEMD_UNIT"}, false), + ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "SYSTEMD_UNIT", "service_name"}, false), }, MaxItems: 5, Optional: true, diff --git a/internal/sdkprovider/userconfig/serviceintegration/kafka_logs.go b/internal/sdkprovider/userconfig/serviceintegration/kafka_logs.go index 9dc5c5d25..7ed77d1b4 100644 --- a/internal/sdkprovider/userconfig/serviceintegration/kafka_logs.go +++ b/internal/sdkprovider/userconfig/serviceintegration/kafka_logs.go @@ -22,9 +22,9 @@ func kafkaLogsUserConfig() *schema.Schema { "selected_log_fields": { Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", Elem: &schema.Schema{ - Description: "Enum: `HOSTNAME`, `PRIORITY`, `REALTIME_TIMESTAMP`, `service_name`, `SYSTEMD_UNIT`. Log field name.", + Description: "Enum: `HOSTNAME`, `PRIORITY`, `REALTIME_TIMESTAMP`, `SYSTEMD_UNIT`, `service_name`. Log field name.", Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "service_name", "SYSTEMD_UNIT"}, false), + ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "SYSTEMD_UNIT", "service_name"}, false), }, MaxItems: 5, Optional: true, diff --git a/internal/sdkprovider/userconfig/serviceintegration/kafka_mirrormaker.go b/internal/sdkprovider/userconfig/serviceintegration/kafka_mirrormaker.go index ca941c1dd..891b5b4c6 100644 --- a/internal/sdkprovider/userconfig/serviceintegration/kafka_mirrormaker.go +++ b/internal/sdkprovider/userconfig/serviceintegration/kafka_mirrormaker.go @@ -49,10 +49,10 @@ func kafkaMirrormakerUserConfig() *schema.Schema { Type: schema.TypeInt, }, "producer_compression_type": { - Description: "Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.", + Description: "Enum: `gzip`, `lz4`, `none`, `snappy`, `zstd`. Specify the default compression type for producers. This configuration accepts the standard compression codecs (`gzip`, `snappy`, `lz4`, `zstd`). It additionally accepts `none` which is the default and equivalent to no compression.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"gzip", "snappy", "lz4", "zstd", "none"}, false), + ValidateFunc: validation.StringInSlice([]string{"gzip", "lz4", "none", "snappy", "zstd"}, false), }, "producer_linger_ms": { Description: "The linger time (ms) for waiting new data to arrive for publishing. Example: `100`.", diff --git a/internal/sdkprovider/userconfig/serviceintegration/logs.go b/internal/sdkprovider/userconfig/serviceintegration/logs.go index 7e7b56462..e1f1fdea2 100644 --- a/internal/sdkprovider/userconfig/serviceintegration/logs.go +++ b/internal/sdkprovider/userconfig/serviceintegration/logs.go @@ -27,9 +27,9 @@ func logsUserConfig() *schema.Schema { "selected_log_fields": { Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", Elem: &schema.Schema{ - Description: "Enum: `HOSTNAME`, `PRIORITY`, `REALTIME_TIMESTAMP`, `service_name`, `SYSTEMD_UNIT`. Log field name.", + Description: "Enum: `HOSTNAME`, `PRIORITY`, `REALTIME_TIMESTAMP`, `SYSTEMD_UNIT`, `service_name`. Log field name.", Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "service_name", "SYSTEMD_UNIT"}, false), + ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "SYSTEMD_UNIT", "service_name"}, false), }, MaxItems: 5, Optional: true, diff --git a/internal/sdkprovider/userconfig/serviceintegrationendpoint/datadog.go b/internal/sdkprovider/userconfig/serviceintegrationendpoint/datadog.go index da87deff1..f03a7d94b 100644 --- a/internal/sdkprovider/userconfig/serviceintegrationendpoint/datadog.go +++ b/internal/sdkprovider/userconfig/serviceintegrationendpoint/datadog.go @@ -59,10 +59,10 @@ func datadogUserConfig() *schema.Schema { Type: schema.TypeInt, }, "site": { - Description: "Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.", + Description: "Enum: `ap1.datadoghq.com`, `datadoghq.com`, `datadoghq.eu`, `ddog-gov.com`, `us3.datadoghq.com`, `us5.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"datadoghq.com", "datadoghq.eu", "us3.datadoghq.com", "us5.datadoghq.com", "ddog-gov.com", "ap1.datadoghq.com"}, false), + ValidateFunc: validation.StringInSlice([]string{"ap1.datadoghq.com", "datadoghq.com", "datadoghq.eu", "ddog-gov.com", "us3.datadoghq.com", "us5.datadoghq.com"}, false), }, }}, MaxItems: 1, diff --git a/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_kafka.go b/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_kafka.go index 45c7a6b57..0399cdd7f 100644 --- a/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_kafka.go +++ b/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_kafka.go @@ -37,10 +37,10 @@ func externalKafkaUserConfig() *schema.Schema { Type: schema.TypeString, }, "security_protocol": { - Description: "Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol.", + Description: "Enum: `PLAINTEXT`, `SASL_PLAINTEXT`, `SASL_SSL`, `SSL`. Security protocol.", Required: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"}, false), + ValidateFunc: validation.StringInSlice([]string{"PLAINTEXT", "SASL_PLAINTEXT", "SASL_SSL", "SSL"}, false), }, "ssl_ca_cert": { Description: "PEM-encoded CA certificate. Example: `-----BEGIN CERTIFICATE-----\n...\n-----END CERTIFICATE-----\n`.", diff --git a/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_postgresql.go b/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_postgresql.go index cfb54f7fc..223e6c4ee 100644 --- a/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_postgresql.go +++ b/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_postgresql.go @@ -47,10 +47,10 @@ func externalPostgresqlUserConfig() *schema.Schema { Type: schema.TypeString, }, "ssl_mode": { - Description: "Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`.", + Description: "Enum: `allow`, `disable`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL mode to use for the connection. Please note that Aiven requires TLS for all connections to external PostgreSQL services. Default: `verify-full`.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"disable", "allow", "prefer", "require", "verify-ca", "verify-full"}, false), + ValidateFunc: validation.StringInSlice([]string{"allow", "disable", "prefer", "require", "verify-ca", "verify-full"}, false), }, "ssl_root_cert": { Description: "SSL Root Cert. Example: `-----BEGIN CERTIFICATE-----\n...\n-----END CERTIFICATE-----\n`.", diff --git a/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_schema_registry.go b/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_schema_registry.go index 8c89d71b3..00d7e443f 100644 --- a/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_schema_registry.go +++ b/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_schema_registry.go @@ -15,10 +15,10 @@ func externalSchemaRegistryUserConfig() *schema.Schema { DiffSuppressFunc: diff.SuppressUnchanged, Elem: &schema.Resource{Schema: map[string]*schema.Schema{ "authentication": { - Description: "Enum: `none`, `basic`. Authentication method.", + Description: "Enum: `basic`, `none`. Authentication method.", Required: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"none", "basic"}, false), + ValidateFunc: validation.StringInSlice([]string{"basic", "none"}, false), }, "basic_auth_password": { Description: "Basic authentication password. Example: `Zm9vYg==`.", diff --git a/internal/sdkprovider/userconfig/serviceintegrationendpoint/rsyslog.go b/internal/sdkprovider/userconfig/serviceintegrationendpoint/rsyslog.go index 08b1929bd..485de2f7e 100644 --- a/internal/sdkprovider/userconfig/serviceintegrationendpoint/rsyslog.go +++ b/internal/sdkprovider/userconfig/serviceintegrationendpoint/rsyslog.go @@ -25,10 +25,10 @@ func rsyslogUserConfig() *schema.Schema { Type: schema.TypeString, }, "format": { - Description: "Enum: `rfc5424`, `rfc3164`, `custom`. Message format. Default: `rfc5424`.", + Description: "Enum: `custom`, `rfc3164`, `rfc5424`. Message format. Default: `rfc5424`.", Required: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"rfc5424", "rfc3164", "custom"}, false), + ValidateFunc: validation.StringInSlice([]string{"custom", "rfc3164", "rfc5424"}, false), }, "key": { Description: "PEM encoded client key. Example: `-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n`.", diff --git a/ucgenerator/models.go b/ucgenerator/models.go index f7259354a..61799b96b 100644 --- a/ucgenerator/models.go +++ b/ucgenerator/models.go @@ -298,6 +298,14 @@ func unwrapArrayMultipleTypes(o *object) { fields[key] = p } else if len(p.ArrayItems.OneOf) != 0 { + // Usually it starts with a scalar type and then evolves to object + // Prioritizes scalar types + sort.Slice(p.ArrayItems.OneOf, func(i, j int) bool { + it := p.ArrayItems.OneOf[i].OrigType.(string) + jt := p.ArrayItems.OneOf[j].OrigType.(string) + return it != "object" || it < jt + }) + // Unwraps multiple _type objects_, e.g. [{type:string}, {type: object}] for i := range p.ArrayItems.OneOf { t := p.ArrayItems.OneOf[i]