diff --git a/internal/plugin/util/helpers.go b/internal/plugin/util/helpers.go index c9d08436e..0c09938ba 100644 --- a/internal/plugin/util/helpers.go +++ b/internal/plugin/util/helpers.go @@ -15,17 +15,6 @@ func Ref[T any](v T) *T { return &v } -// Deref is a helper function that dereferences any pointer type and returns the value. -func Deref[T any](p *T) T { - var result T - - if p != nil { - result = *p - } - - return result -} - // First is a helper function that returns the first argument passed in out of two. func First[T any, U any](a T, _ U) T { return a diff --git a/internal/plugin/util/pluginhelpers.go b/internal/plugin/util/pluginhelpers.go index 15d35de03..429163678 100644 --- a/internal/plugin/util/pluginhelpers.go +++ b/internal/plugin/util/pluginhelpers.go @@ -25,15 +25,6 @@ func ComposeID(parts ...string) string { return strings.Join(parts, "/") } -// BetaDescription is a helper function that returns a description for beta resources. -func BetaDescription(description string) string { - return description + ` - -**This resource is in beta and may change without notice.** To use this resource, -set the ` + "`PROVIDER_AIVEN_ENABLE_BETA`" + ` environment variable to true. -` -} - // ValueOrDefault returns the value if not nil, otherwise returns the default value. Value is converted to type // U if possible. If the conversion is not possible, the function panics. // diff --git a/internal/schemautil/mutations.go b/internal/schemautil/mutations.go deleted file mode 100644 index 3cc31d9ad..000000000 --- a/internal/schemautil/mutations.go +++ /dev/null @@ -1,115 +0,0 @@ -package schemautil - -// sensitiveFields is a list of fields that are not returned by the API on a refresh, but are supposed to remain in the -// state to make Terraform work properly. -var sensitiveFields = []string{ - "admin_username", - "admin_password", -} - -// copySensitiveFields preserves sensitive fields in the state that are not returned by the API on a refresh. -func copySensitiveFields(old, new map[string]interface{}) { - for _, k := range sensitiveFields { - if v, ok := old[k]; ok { - new[k] = v - } - } -} - -// normalizeIPFilter compares a list of IP filters set in the old user config and a sorted version coming from the new -// user config and returns the re-sorted IP filters, such that all matching entries will be in the same order as -// defined in the old user config. -func normalizeIPFilter(old, new map[string]interface{}) { - oldIPFilters, _ := old["ip_filter"].([]interface{}) - newIPFilters, _ := new["ip_filter"].([]interface{}) - fieldToWrite := "ip_filter" - - if oldIPFilters == nil || newIPFilters == nil { - var ok bool - - oldIPFilters, _ = old["ip_filter_string"].([]interface{}) - - newIPFilters, ok = new["ip_filter_string"].([]interface{}) - - fieldToWrite = "ip_filter_string" - - if !ok { - oldIPFilters, ok = old["ip_filter_object"].([]interface{}) - if !ok { - return - } - - newIPFilters, ok = new["ip_filter_object"].([]interface{}) - if !ok { - return - } - - fieldToWrite = "ip_filter_object" - } - } - - var normalizedIPFilters []interface{} - var nonexistentIPFilters []interface{} - - // First, we take all the elements from old and if they match with the elements in new, - // we preserve them in the same order as they were defined in old. - for _, o := range oldIPFilters { - for _, n := range newIPFilters { - // Define two comparison variables to avoid code duplication in the loop. - var comparableO interface{} - - var comparableN interface{} - - // If we're dealing with a string format, we need to compare the values directly. - if fieldToWrite == "ip_filter" || fieldToWrite == "ip_filter_string" { - comparableO = o - - comparableN = n - } else { - // If we're dealing with an object format, we need to compare the values of the "network" field. - comparableO = o.(map[string]interface{})["network"] - - comparableN = n.(map[string]interface{})["network"] - } - - if comparableO == comparableN { - normalizedIPFilters = append(normalizedIPFilters, o) - break - } - } - } - - // Second, we take the new and check whether there are any differences with the old, and - // append those to nonexistentIPFilters. - for _, n := range newIPFilters { - found := false - - for _, o := range oldIPFilters { - var comparableO interface{} - - var comparableN interface{} - - if fieldToWrite == "ip_filter" || fieldToWrite == "ip_filter_string" { - comparableO = o - - comparableN = n - } else { - comparableO = o.(map[string]interface{})["network"] - - comparableN = n.(map[string]interface{})["network"] - } - - if comparableO == comparableN { - found = true - - break - } - } - - if !found { - nonexistentIPFilters = append(nonexistentIPFilters, n) - } - } - - new[fieldToWrite] = append(normalizedIPFilters, nonexistentIPFilters...) -} diff --git a/internal/schemautil/mutations_test.go b/internal/schemautil/mutations_test.go deleted file mode 100644 index b764d75d7..000000000 --- a/internal/schemautil/mutations_test.go +++ /dev/null @@ -1,265 +0,0 @@ -package schemautil - -import ( - "testing" - - "github.com/google/go-cmp/cmp" -) - -// TestCopySensitiveFields tests the copySensitiveFields function. -func TestCopySensitiveFields(t *testing.T) { - type args struct { - old map[string]interface{} - new map[string]interface{} - } - tests := []struct { - name string - args args - want map[string]interface{} - }{ - { - name: "empty", - args: args{ - old: map[string]interface{}{}, - new: map[string]interface{}{}, - }, - want: map[string]interface{}{}, - }, - { - name: "no sensitive fields", - args: args{ - old: map[string]interface{}{ - "foo": "bar", - }, - new: map[string]interface{}{ - "foo": "bar", - }, - }, - want: map[string]interface{}{ - "foo": "bar", - }, - }, - { - name: "sensitive fields", - args: args{ - old: map[string]interface{}{ - "foo": "bar", - "admin_username": "admin", - "admin_password": "password", - }, - new: map[string]interface{}{ - "foo": "bar", - }, - }, - want: map[string]interface{}{ - "foo": "bar", - "admin_username": "admin", - "admin_password": "password", - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - copySensitiveFields(tt.args.old, tt.args.new) - - if !cmp.Equal(tt.args.new, tt.want) { - t.Errorf(cmp.Diff(tt.want, tt.args.new)) - } - }) - } -} - -// TestNormalizeIpFilter tests the normalizeIPFilter function. -func TestNormalizeIpFilter(t *testing.T) { - type args struct { - old map[string]interface{} - new map[string]interface{} - } - tests := []struct { - name string - args args - want map[string]interface{} - }{ - { - name: "empty", - args: args{ - old: map[string]interface{}{}, - new: map[string]interface{}{}, - }, - want: map[string]interface{}{}, - }, - { - name: "no ip filter", - args: args{ - old: map[string]interface{}{ - "foo": "bar", - }, - new: map[string]interface{}{ - "foo": "bar", - }, - }, - want: map[string]interface{}{ - "foo": "bar", - }, - }, - { - name: "ip filter", - args: args{ - old: map[string]interface{}{ - "foo": "bar", - "ip_filter": []interface{}{ - "1.3.3.8/32", - "1.3.3.7/32", - }, - }, - new: map[string]interface{}{ - "foo": "bar", - "ip_filter": []interface{}{ - "1.3.3.7/32", - "1.3.3.8/32", - }, - }, - }, - want: map[string]interface{}{ - "foo": "bar", - "ip_filter": []interface{}{ - "1.3.3.8/32", - "1.3.3.7/32", - }, - }, - }, - { - name: "ip filter with remote changes", - args: args{ - old: map[string]interface{}{ - "foo": "bar", - "ip_filter": []interface{}{ - "1.3.3.8/32", - "1.3.3.7/32", - }, - }, - new: map[string]interface{}{ - "foo": "bar", - "ip_filter": []interface{}{ - "1.3.3.7/32", - "1.3.3.8/32", - "1.3.3.9/32", - }, - }, - }, - want: map[string]interface{}{ - "foo": "bar", - "ip_filter": []interface{}{ - "1.3.3.8/32", - "1.3.3.7/32", - "1.3.3.9/32", - }, - }, - }, - { - name: "ip filter object", - args: args{ - old: map[string]interface{}{ - "foo": "bar", - "ip_filter_object": []interface{}{ - map[string]interface{}{ - "network": "1.3.3.8/32", - "description": "foo", - }, - map[string]interface{}{ - "network": "1.3.3.7/32", - "description": "foo", - }, - }, - }, - new: map[string]interface{}{ - "foo": "bar", - "ip_filter_object": []interface{}{ - map[string]interface{}{ - "network": "1.3.3.7/32", - "description": "foo", - }, - map[string]interface{}{ - "network": "1.3.3.8/32", - "description": "foo", - }, - }, - }, - }, - want: map[string]interface{}{ - "foo": "bar", - "ip_filter_object": []interface{}{ - map[string]interface{}{ - "network": "1.3.3.8/32", - "description": "foo", - }, - map[string]interface{}{ - "network": "1.3.3.7/32", - "description": "foo", - }, - }, - }, - }, - { - name: "ip filter object with remote changes", - args: args{ - old: map[string]interface{}{ - "foo": "bar", - "ip_filter_object": []interface{}{ - map[string]interface{}{ - "network": "1.3.3.8/32", - "description": "foo", - }, - map[string]interface{}{ - "network": "1.3.3.7/32", - "description": "foo", - }, - }, - }, - new: map[string]interface{}{ - "foo": "bar", - "ip_filter_object": []interface{}{ - map[string]interface{}{ - "network": "1.3.3.7/32", - "description": "foo", - }, - map[string]interface{}{ - "network": "1.3.3.8/32", - "description": "foo", - }, - map[string]interface{}{ - "network": "1.3.3.9/32", - "description": "foo", - }, - }, - }, - }, - want: map[string]interface{}{ - "foo": "bar", - "ip_filter_object": []interface{}{ - map[string]interface{}{ - "network": "1.3.3.8/32", - "description": "foo", - }, - map[string]interface{}{ - "network": "1.3.3.7/32", - "description": "foo", - }, - map[string]interface{}{ - "network": "1.3.3.9/32", - "description": "foo", - }, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - normalizeIPFilter(tt.args.old, tt.args.new) - - if !cmp.Equal(tt.args.new, tt.want) { - t.Errorf(cmp.Diff(tt.want, tt.args.new)) - } - }) - } -} diff --git a/internal/schemautil/schemautil.go b/internal/schemautil/schemautil.go index c32607277..40c934503 100644 --- a/internal/schemautil/schemautil.go +++ b/internal/schemautil/schemautil.go @@ -7,7 +7,6 @@ import ( "regexp" "strconv" "strings" - "time" "github.com/aiven/aiven-go-client/v2" "github.com/docker/go-units" @@ -198,15 +197,6 @@ func TrimSpaceDiffSuppressFunc(_, old, new string, _ *schema.ResourceData) bool return strings.TrimSpace(old) == strings.TrimSpace(new) } -// ValidateDurationString is a ValidateFunc that ensures a string parses -// as time.Duration format -func ValidateDurationString(v interface{}, k string) (ws []string, errors []error) { - if _, err := time.ParseDuration(v.(string)); err != nil { - errors = append(errors, fmt.Errorf("%q: invalid duration", k)) - } - return -} - // ValidateHumanByteSizeString is a ValidateFunc that ensures a string parses // as units.Bytes format func ValidateHumanByteSizeString(v interface{}, k string) (ws []string, errors []error) { diff --git a/internal/schemautil/schemautil_test.go b/internal/schemautil/schemautil_test.go index 951b48d53..6bb4fed44 100644 --- a/internal/schemautil/schemautil_test.go +++ b/internal/schemautil/schemautil_test.go @@ -8,49 +8,6 @@ import ( "github.com/stretchr/testify/assert" ) -func Test_validateDurationString(t *testing.T) { - type args struct { - v interface{} - k string - } - tests := []struct { - name string - args args - wantWs []string - wantErrors bool - }{ - { - "basic", - args{ - v: "2m", - k: "", - }, - nil, - false, - }, - { - "wrong-duration", - args{ - v: "123qweert", - k: "", - }, - nil, - true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotWs, gotErrors := ValidateDurationString(tt.args.v, tt.args.k) - if !reflect.DeepEqual(gotWs, tt.wantWs) { - t.Errorf("validateDurationString() gotWs = %v, want %v", gotWs, tt.wantWs) - } - if !(tt.wantErrors == (len(gotErrors) > 0)) { - t.Errorf("validateDurationString() gotErrors = %v", gotErrors) - } - }) - } -} - func Test_splitResourceID(t *testing.T) { type args struct { resourceID string diff --git a/internal/schemautil/userconfig/.gitignore b/internal/schemautil/userconfig/.gitignore deleted file mode 100644 index af18ca955..000000000 --- a/internal/schemautil/userconfig/.gitignore +++ /dev/null @@ -1 +0,0 @@ -!dist/ diff --git a/internal/schemautil/userconfig/apiconvert/fromapi.go b/internal/schemautil/userconfig/apiconvert/fromapi.go deleted file mode 100644 index 9d103c49d..000000000 --- a/internal/schemautil/userconfig/apiconvert/fromapi.go +++ /dev/null @@ -1,253 +0,0 @@ -package apiconvert - -import ( - "fmt" - - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig" -) - -// hasNestedProperties is a function that returns a map of nested properties and a boolean indicating whether the given -// value has nested properties. -func hasNestedProperties( - valueReceived any, - valueAttributes map[string]any, -) (map[string]any, bool) { - var properties map[string]any - var resultOk bool - - valueReceivedAsArray, isArray := valueReceived.([]any) - if !isArray { - return properties, resultOk - } - - for _, value := range valueReceivedAsArray { - if property, isPropertyMap := value.(map[string]any); isPropertyMap && property != nil { - resultOk = true - break - } - } - - if itemsProperty, isArray := valueAttributes["items"].(map[string]any); isArray && resultOk { - if propertyValuesRaw, isPropertyMap := itemsProperty["properties"].(map[string]any); isPropertyMap { - properties = propertyValuesRaw - } - } - - if resultOk && len(properties) == 0 { - resultOk = false - } - - return properties, resultOk -} - -// unsetAPIValue is a function that returns an unset value for a given type. -func unsetAPIValue(valueType string) any { - var unsetValue any - - switch valueType { - case "boolean": - unsetValue = false - case "integer": - unsetValue = 0 - case "number": - unsetValue = float64(0) - case "string": - unsetValue = "" - case "array": - unsetValue = []any{} - } - - return unsetValue -} - -// parsePropertiesFromAPI is a function that returns a map of properties parsed from an API response. -func parsePropertiesFromAPI( - nestedName string, - responseMapping map[string]any, - propertyMapping map[string]any, -) (map[string]any, error) { - propertyMappingCopy := make(map[string]any, len(propertyMapping)) - - for key, value := range propertyMapping { - valueAttributes, isMap := value.(map[string]any) - if !isMap { - return nil, fmt.Errorf("%s...%s: property is not a map", nestedName, key) - } - - _, aivenTypes, err := userconfig.TerraformTypes(userconfig.SlicedString(valueAttributes["type"])) - if err != nil { - return nil, err - } - - if len(aivenTypes) > 1 { - return nil, fmt.Errorf("%s...%s: multiple types", nestedName, key) - } - - typeReceived := aivenTypes[0] - - valueReceived, keyExists := responseMapping[key] - if !keyExists || valueReceived == nil { - if typeReceived == "object" { - continue - } - - valueReceived = unsetAPIValue(typeReceived) - } - - var valueReceivedParsed any - - switch typeReceived { - default: - switch valueReceivedAsArray := valueReceived.(type) { - default: - valueReceivedParsed = valueReceived - case []any: - var list []any - - if valueNestedProperties, isArray := hasNestedProperties(valueReceived, valueAttributes); isArray { - for nestedKey, nestedValue := range valueReceivedAsArray { - nestedValueAlpha, valueIsMap := nestedValue.(map[string]any) - if !valueIsMap { - return nil, fmt.Errorf( - "%s...%s.%d: slice item is not a map", nestedName, key, nestedKey, - ) - } - - propertiesParsed, err := parsePropertiesFromAPI( - nestedName, nestedValueAlpha, valueNestedProperties, - ) - if err != nil { - return nil, err - } - - list = append(list, propertiesParsed) - } - } else { - list = append(list, valueReceivedAsArray...) - } - - var nestedTypes []string - - if itemKey, isArray := valueAttributes["items"].(map[string]any); isArray { - if oneOfNumericKey, isArrayNumeric := itemKey["one_of"].([]any); isArrayNumeric { - for _, nestedValue := range oneOfNumericKey { - if nestedValueAlpha, valueIsMap := nestedValue.(map[string]any); valueIsMap { - if nestedValueAlphaType, valueIsString := - nestedValueAlpha["type"].(string); valueIsString { - nestedTypes = append(nestedTypes, nestedValueAlphaType) - } - } - } - } else { - _, nestedTypes, err = userconfig.TerraformTypes(userconfig.SlicedString(itemKey["type"])) - if err != nil { - return nil, err - } - } - } - - if len(nestedTypes) > 1 { - if len(list) > 0 { - listFirstSeries := list[0] - - switch listFirstSeries.(type) { - case bool: - key = fmt.Sprintf("%s_boolean", key) - case int: - key = fmt.Sprintf("%s_integer", key) - case float64: - key = fmt.Sprintf("%s_number", key) - case string: - key = fmt.Sprintf("%s_string", key) - case []any: - key = fmt.Sprintf("%s_array", key) - case map[string]any: - key = fmt.Sprintf("%s_object", key) - default: - return nil, fmt.Errorf("%s...%s: no suffix for given type", nestedName, key) - } - - if key == "ip_filter_string" { - key = "ip_filter" - } - - if key == "namespaces_string" { - key = "namespaces" - } - } else { - for _, nestedValue := range nestedTypes { - trimmedKey := fmt.Sprintf("%s_%s", key, nestedValue) - - if trimmedKey == "ip_filter_string" { - trimmedKey = "ip_filter" - } - - if trimmedKey == "namespaces_string" { - trimmedKey = "namespaces" - } - - propertyMappingCopy[trimmedKey] = list - } - - continue - } - } - - valueReceivedParsed = list - } - case "object": - valueReceivedAsAlpha, valueIsMap := valueReceived.(map[string]any) - if !valueIsMap { - return nil, fmt.Errorf("%s...%s: representation value is not a map", nestedName, key) - } - - nestedValues, keyExists := valueAttributes["properties"] - if !keyExists { - return nil, fmt.Errorf("%s...%s: properties key not found", nestedName, key) - } - - nestedValuesAsAlpha, valueIsMap := nestedValues.(map[string]any) - if !valueIsMap { - return nil, fmt.Errorf("%s...%s: properties value is not a map", nestedName, key) - } - - propertiesParsed, err := parsePropertiesFromAPI(nestedName, valueReceivedAsAlpha, nestedValuesAsAlpha) - if err != nil { - return nil, err - } - - valueReceivedParsed = []map[string]any{propertiesParsed} - } - - propertyMappingCopy[userconfig.EncodeKey(key)] = valueReceivedParsed - } - - return propertyMappingCopy, nil -} - -// FromAPI is a function that returns a slice of properties parsed from an API response. -func FromAPI( - schemaType userconfig.SchemaType, - nestedName string, - responseMapping map[string]any, -) ([]map[string]any, error) { - var propertiesParsed []map[string]any - - if len(responseMapping) == 0 { - return propertiesParsed, nil - } - - propertyRequests, _, err := propsReqs(schemaType, nestedName) - if err != nil { - return nil, err - } - - propertyAttributes, err := parsePropertiesFromAPI(nestedName, responseMapping, propertyRequests) - if err != nil { - return nil, err - } - - propertiesParsed = append(propertiesParsed, propertyAttributes) - - return propertiesParsed, nil -} diff --git a/internal/schemautil/userconfig/apiconvert/fromapi_test.go b/internal/schemautil/userconfig/apiconvert/fromapi_test.go deleted file mode 100644 index 007a3e98c..000000000 --- a/internal/schemautil/userconfig/apiconvert/fromapi_test.go +++ /dev/null @@ -1,370 +0,0 @@ -package apiconvert - -import ( - "testing" - - "github.com/google/go-cmp/cmp" - - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig" -) - -// TestFromAPI is a test for FromAPI. -func TestFromAPI(t *testing.T) { - type args struct { - schemaType userconfig.SchemaType - serviceName string - request map[string]any - } - - tests := []struct { - name string - args args - want []map[string]any - }{ - { - name: "boolean", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - request: map[string]any{ - "m3coordinator_enable_graphite_carbon_ingest": true, - }, - }, - want: []map[string]any{{ - "additional_backup_regions": []any(nil), - "custom_domain": "", - "ip_filter": []any(nil), - "ip_filter_object": []any(nil), - "m3coordinator_enable_graphite_carbon_ingest": true, - "m3db_version": "", - "m3_version": "", - "namespaces": []any(nil), - "project_to_fork_from": "", - "service_log": false, - "service_to_fork_from": "", - "static_ips": false, - }}, - }, - { - name: "integer", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - request: map[string]any{ - "limits": map[string]any{ - "max_recently_queried_series_blocks": 20000, - }, - }, - }, - want: []map[string]any{{ - "additional_backup_regions": []any(nil), - "custom_domain": "", - "ip_filter": []any(nil), - "ip_filter_object": []any(nil), - "limits": []map[string]any{{ - "max_recently_queried_series_blocks": 20000, - "max_recently_queried_series_disk_bytes_read": 0, - "max_recently_queried_series_lookback": "", - "query_docs": 0, - "query_require_exhaustive": false, - "query_series": 0, - }}, - "m3coordinator_enable_graphite_carbon_ingest": false, - "m3db_version": "", - "m3_version": "", - "namespaces": []any(nil), - "project_to_fork_from": "", - "service_log": false, - "service_to_fork_from": "", - "static_ips": false, - }}, - }, - { - name: "number and object", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "kafka", - request: map[string]any{ - "kafka": map[string]any{ - "log_cleaner_min_cleanable_ratio": 0.5, - }, - }, - }, - want: []map[string]any{{ - "additional_backup_regions": []any(nil), - "aiven_kafka_topic_messages": false, - "custom_domain": "", - "ip_filter": []any(nil), - "ip_filter_object": []any(nil), - "kafka": []map[string]any{{ - "auto_create_topics_enable": false, - "compression_type": "", - "connections_max_idle_ms": 0, - "default_replication_factor": 0, - "group_initial_rebalance_delay_ms": 0, - "group_max_session_timeout_ms": 0, - "group_min_session_timeout_ms": 0, - "log_cleaner_delete_retention_ms": 0, - "log_cleaner_max_compaction_lag_ms": 0, - "log_cleaner_min_cleanable_ratio": 0.5, - "log_cleaner_min_compaction_lag_ms": 0, - "log_cleanup_policy": "", - "log_flush_interval_messages": 0, - "log_flush_interval_ms": 0, - "log_index_interval_bytes": 0, - "log_index_size_max_bytes": 0, - "log_local_retention_bytes": 0, - "log_local_retention_ms": 0, - "log_message_downconversion_enable": false, - "log_message_timestamp_difference_max_ms": 0, - "log_message_timestamp_type": "", - "log_preallocate": false, - "log_retention_bytes": 0, - "log_retention_hours": 0, - "log_retention_ms": 0, - "log_roll_jitter_ms": 0, - "log_roll_ms": 0, - "log_segment_bytes": 0, - "log_segment_delete_delay_ms": 0, - "max_connections_per_ip": 0, - "max_incremental_fetch_session_cache_slots": 0, - "message_max_bytes": 0, - "min_insync_replicas": 0, - "num_partitions": 0, - "offsets_retention_minutes": 0, - "producer_purgatory_purge_interval_requests": 0, - "replica_fetch_max_bytes": 0, - "replica_fetch_response_max_bytes": 0, - "sasl_oauthbearer_expected_audience": "", - "sasl_oauthbearer_expected_issuer": "", - "sasl_oauthbearer_jwks_endpoint_url": "", - "sasl_oauthbearer_sub_claim_name": "", - "socket_request_max_bytes": 0, - "transaction_partition_verification_enable": false, - "transaction_remove_expired_transaction_cleanup_interval_ms": 0, - "transaction_state_log_segment_bytes": 0, - }}, - "kafka_connect": false, - "kafka_rest": false, - "kafka_rest_authorization": false, - "kafka_version": "", - "schema_registry": false, - "service_log": false, - "static_ips": false, - }}, - }, - { - name: "array", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - request: map[string]any{ - "namespaces": []any{ - map[string]any{ - "name": "default", - "type": "unaggregated", - }, - }, - }, - }, - want: []map[string]any{{ - "additional_backup_regions": []any(nil), - "custom_domain": "", - "ip_filter": []any(nil), - "ip_filter_object": []any(nil), - "m3coordinator_enable_graphite_carbon_ingest": false, - "m3db_version": "", - "m3_version": "", - "namespaces": []any{ - map[string]any{ - "name": "default", - "resolution": "", - "type": "unaggregated", - }, - }, - "project_to_fork_from": "", - "service_log": false, - "service_to_fork_from": "", - "static_ips": false, - }}, - }, - { - name: "strings in one to many array", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - request: map[string]any{ - "ip_filter": []any{ - "0.0.0.0/0", - "10.20.0.0/16", - }, - }, - }, - want: []map[string]any{{ - "additional_backup_regions": []any(nil), - "custom_domain": "", - "ip_filter": []any{ - "0.0.0.0/0", - "10.20.0.0/16", - }, - "m3coordinator_enable_graphite_carbon_ingest": false, - "m3db_version": "", - "m3_version": "", - "namespaces": []any(nil), - "project_to_fork_from": "", - "service_log": false, - "service_to_fork_from": "", - "static_ips": false, - }}, - }, - { - name: "objects in one to many array", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - request: map[string]any{ - "ip_filter": []any{ - map[string]any{ - "description": "test", - "network": "0.0.0.0/0", - }, - map[string]any{ - "description": "", - "network": "10.20.0.0/16", - }, - }, - }, - }, - want: []map[string]any{{ - "additional_backup_regions": []any(nil), - "custom_domain": "", - "ip_filter_object": []any{ - map[string]any{ - "description": "test", - "network": "0.0.0.0/0", - }, - map[string]any{ - "description": "", - "network": "10.20.0.0/16", - }, - }, - "m3coordinator_enable_graphite_carbon_ingest": false, - "m3db_version": "", - "m3_version": "", - "namespaces": []any(nil), - "project_to_fork_from": "", - "service_log": false, - "service_to_fork_from": "", - "static_ips": false, - }}, - }, - { - name: "strings in one to many array via one_of", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - request: map[string]any{ - "rules": map[string]any{ - "mapping": []any{ - map[string]any{ - "namespaces": []any{ - "aggregated_*", - }, - }, - }, - }, - }, - }, - want: []map[string]any{{ - "additional_backup_regions": []any(nil), - "custom_domain": "", - "ip_filter": []any(nil), - "ip_filter_object": []any(nil), - "m3coordinator_enable_graphite_carbon_ingest": false, - "m3db_version": "", - "m3_version": "", - "namespaces": []any(nil), - "project_to_fork_from": "", - "rules": []map[string]any{{ - "mapping": []any{ - map[string]any{ - "aggregations": []any(nil), - "drop": false, - "filter": "", - "name": "", - "namespaces": []any{ - "aggregated_*", - }, - "tags": []any(nil), - }, - }, - }}, - "service_log": false, - "service_to_fork_from": "", - "static_ips": false, - }}, - }, - { - name: "objects in one to many array via one_of", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - request: map[string]any{ - "rules": map[string]any{ - "mapping": []any{ - map[string]any{ - "namespaces": []any{ - map[string]any{ - "resolution": "30s", - "retention": "48h", - }, - }, - }, - }, - }, - }, - }, - want: []map[string]any{{ - "additional_backup_regions": []any(nil), - "custom_domain": "", - "ip_filter": []any(nil), - "ip_filter_object": []any(nil), - "m3coordinator_enable_graphite_carbon_ingest": false, - "m3db_version": "", - "m3_version": "", - "namespaces": []any(nil), - "project_to_fork_from": "", - "rules": []map[string]any{{ - "mapping": []any{ - map[string]any{ - "aggregations": []any(nil), - "drop": false, - "filter": "", - "name": "", - "namespaces_object": []any{ - map[string]any{ - "resolution": "30s", - "retention": "48h", - }, - }, - "tags": []any(nil), - }, - }, - }}, - "service_log": false, - "service_to_fork_from": "", - "static_ips": false, - }}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, _ := FromAPI(tt.args.schemaType, tt.args.serviceName, tt.args.request) - - if !cmp.Equal(got, tt.want) { - t.Errorf(cmp.Diff(tt.want, got)) - } - }) - } -} diff --git a/internal/schemautil/userconfig/apiconvert/toapi.go b/internal/schemautil/userconfig/apiconvert/toapi.go deleted file mode 100644 index 12a897c59..000000000 --- a/internal/schemautil/userconfig/apiconvert/toapi.go +++ /dev/null @@ -1,482 +0,0 @@ -package apiconvert - -import ( - "encoding/json" - "fmt" - "reflect" - "regexp" - "strings" - - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig" -) - -// resourceDatable is an interface that allows to get the resource data from the schema. -// This is needed to be able to test the conversion functions. See schema.ResourceData for more. -type resourceDatable interface { - GetOk(string) (any, bool) - HasChange(string) bool - IsNewResource() bool -} - -var ( - // keyPathEndingInNumberRegExp is a regular expression that matches a string that matches: - // 1. key.1.key2.0.key3.2.key.5 - // 2. key123.0 - // 3. key.1 - // 4. key2.9 - // 5. key..8 - // and does not match: - // 1. key.key2 - // 2. key.01 - // 3. key.abc - // 4. .1 - // 5. key. - keyPathEndingInNumberRegExp = regexp.MustCompile(`.+\.[0-9]$`) - - // dotSeparatedNumberRegExp is a regular expression that matches a string that matches: - // 1. .5 (match: .5) - // 2. .9. (match: .9.) - // 3. 0.1 (match: .1) - // 4. key.2 (match: .2) - // 5. 1.2.3 (match: .2.) - // 6. key..8 (match: .8) - // and does not match: - // 1. .123 - // 2. 1. - // 3. 1.. - // 4. .5a - dotSeparatedNumberRegExp = regexp.MustCompile(`\.\d($|\.)`) -) - -// arrayItemToAPI is a function that converts array property of Terraform user configuration schema to API -// compatible format. -func arrayItemToAPI( - serviceName string, - fullKeyPath []string, - arrayKey string, - arrayValues []any, - itemMap map[string]any, - resourceData resourceDatable, -) (any, bool, error) { - var convertedValues []any - - if len(arrayValues) == 0 { - return json.RawMessage("[]"), false, nil - } - - fullKeyString := strings.Join(fullKeyPath, ".") - - // TODO: Remove when this is fixed on backend. - if arrayKey == "additional_backup_regions" { - return convertedValues, true, nil - } - - itemMapItems, ok := itemMap["items"].(map[string]any) - if !ok { - return nil, false, fmt.Errorf("%s (item): items key not found", fullKeyString) - } - - var itemType string - - // If the key has a type suffix, we use it to determine the type of the value. - if userconfig.IsKeyTyped(arrayKey) { - itemType = arrayKey[strings.LastIndexByte(arrayKey, '_')+1:] - - // Find the one_of item that matches the type. - if oneOfItems, ok := itemMapItems["one_of"]; ok { - oneOfItemsSlice, ok := oneOfItems.([]any) - if !ok { - return nil, false, fmt.Errorf("%s (items.one_of): not a slice", fullKeyString) - } - - for i, oneOfItem := range oneOfItemsSlice { - oneOfItemMap, ok := oneOfItem.(map[string]any) - if !ok { - return nil, false, fmt.Errorf("%s (items.one_of.%d): not a map", fullKeyString, i) - } - - if itemTypeValue, ok := oneOfItemMap["type"]; ok && itemTypeValue == itemType { - itemMapItems = oneOfItemMap - - break - } - } - } - } else { - // TODO: Remove this statement and the branch below it with the next major version. - _, ok := itemMapItems["one_of"] - - if arrayKey == "ip_filter" || (ok && arrayKey == "namespaces") { - itemType = "string" - } else { - _, itemTypes, err := userconfig.TerraformTypes(userconfig.SlicedString(itemMapItems["type"])) - if err != nil { - return nil, false, err - } - - if len(itemTypes) > 1 { - return nil, false, fmt.Errorf("%s (type): multiple types", fullKeyString) - } - - itemType = itemTypes[0] - } - } - - for i, arrayValue := range arrayValues { - // We only accept slices there, so we need to nest the value into a slice if the value is of object type. - if itemType == "object" { - arrayValue = []any{arrayValue} - } - - convertedValue, omit, err := itemToAPI( - serviceName, - itemType, - append(fullKeyPath, fmt.Sprintf("%d", i)), - fmt.Sprintf("%s.%d", arrayKey, i), - arrayValue, - itemMapItems, - false, - resourceData, - ) - if err != nil { - return nil, false, err - } - - if !omit { - convertedValues = append(convertedValues, convertedValue) - } - } - - return convertedValues, false, nil -} - -// objectItemToAPI is a function that converts object property of Terraform user configuration schema to API -// compatible format. -func objectItemToAPI( - serviceName string, - fullKeyPath []string, - objectValues []any, - itemSchema map[string]any, - resourceData resourceDatable, -) (any, bool, error) { - var result any - - fullKeyString := strings.Join(fullKeyPath, ".") - - firstValue := objectValues[0] - - // Object with only "null" fields becomes nil - // Which can't be cast into a map - if firstValue == nil { - return result, true, nil - } - - firstValueAsMap, ok := firstValue.(map[string]any) - if !ok { - return nil, false, fmt.Errorf("%s: not a map", fullKeyString) - } - - itemProperties, ok := itemSchema["properties"].(map[string]any) - if !ok { - return nil, false, fmt.Errorf("%s (item): properties key not found", fullKeyString) - } - - requiredFields := map[string]struct{}{} - - if schemaRequiredFields, ok := itemSchema["required"].([]any); ok { - requiredFields = userconfig.SliceToKeyedMap(schemaRequiredFields) - } - - if !keyPathEndingInNumberRegExp.MatchString(fullKeyString) { - fullKeyPath = append(fullKeyPath, "0") - } - - result, err := propsToAPI( - serviceName, - fullKeyPath, - firstValueAsMap, - itemProperties, - requiredFields, - resourceData, - ) - if err != nil { - return nil, false, err - } - - return result, false, nil -} - -// itemToAPI is a function that converts property of Terraform user configuration schema to API compatible format. -func itemToAPI( - serviceName string, - itemType string, - fullKeyPath []string, - key string, - value any, - inputMap map[string]any, - isRequired bool, - resourceData resourceDatable, -) (any, bool, error) { - result := value - - fullKeyString := strings.Join(fullKeyPath, ".") - - omitValue := !resourceData.HasChange(fullKeyString) - - if omitValue && len(fullKeyPath) > 3 { - lastDotWithNumberIndex := dotSeparatedNumberRegExp.FindAllStringIndex(fullKeyString, -1) - if lastDotWithNumberIndex != nil { - _, exists := resourceData.GetOk(fullKeyString) - lengthOfMatches := len(lastDotWithNumberIndex) - - if (exists || !reflect.ValueOf(value).IsZero()) && - resourceData.HasChange(fullKeyString[:lastDotWithNumberIndex[lengthOfMatches-(lengthOfMatches-1)][0]]) { - omitValue = false - } - } - } - - if omitValue && isRequired || key == "basic_auth_username" || key == "basic_auth_password" { - omitValue = false - } - - switch itemType { - case "boolean": - if _, ok := value.(bool); !ok { - return nil, false, fmt.Errorf("%s: not a boolean", fullKeyString) - } - case "integer": - if _, ok := value.(int); !ok { - return nil, false, fmt.Errorf("%s: not an integer", fullKeyString) - } - case "number": - if _, ok := value.(float64); !ok { - return nil, false, fmt.Errorf("%s: not a number", fullKeyString) - } - case "string": - if _, ok := value.(string); !ok { - return nil, false, fmt.Errorf("%s: not a string", fullKeyString) - } - case "array", "object": - valueArray, ok := value.([]any) - if !ok { - return nil, false, fmt.Errorf("%s: not a slice", fullKeyString) - } - - if valueArray == nil || omitValue { - return nil, true, nil - } - - if itemType == "array" { - return arrayItemToAPI(serviceName, fullKeyPath, key, valueArray, inputMap, resourceData) - } - - if len(valueArray) == 0 { - return nil, true, nil - } - - return objectItemToAPI(serviceName, fullKeyPath, valueArray, inputMap, resourceData) - default: - return nil, false, fmt.Errorf("%s: unsupported type %s", fullKeyString, itemType) - } - - return result, omitValue, nil -} - -// processManyToOneKeys processes many to one keys by mapping them to their first non-empty value. -func processManyToOneKeys(result map[string]any) { - // manyToOneKeyMap maps primary keys to their associated many-to-one keys. - manyToOneKeyMap := make(map[string][]string) - - // Iterate over the result map. - // TODO: Remove all ip_filter and namespaces special cases when these fields are removed. - for key, value := range result { - // If the value is a map, process it recursively. - if valueAsMap, ok := value.(map[string]any); ok { - processManyToOneKeys(valueAsMap) - } - - // Ignore keys that are not typed and are not special keys. - if !userconfig.IsKeyTyped(key) && key != "ip_filter" && key != "namespaces" { - continue - } - - // Extract the real key, which is the key without suffix unless it's a special key. - realKey := key - if key != "ip_filter" && key != "namespaces" { - realKey = key[:strings.LastIndexByte(key, '_')] - } - - // Append the key to its corresponding list in the manyToOneKeyMap map. - manyToOneKeyMap[realKey] = append(manyToOneKeyMap[realKey], key) - } - - // By this stage, the 'manyToOneKeyMap' map takes a form similar to the following: - // map[string][]string{ - // // For 'ip_filter', there are two associated keys in the user configuration. The first non-empty one is used, - // // for instance, if the user shifts from 'ip_filter' to 'ip_filter_object', the latter is preferred. - // "ip_filter": []string{"ip_filter", "ip_filter_object"}, - // // For 'namespaces', only a single key is present in the user configuration, so it's directly used. - // "namespaces": []string{"namespaces"}, - // } - - // Iterate over the many-to-one keys. - for primaryKey, associatedKeys := range manyToOneKeyMap { - var newValue any // The new value for the key. - - wasDeleted := false // Track if any key was deleted in the loop. - - // Attempt to process the values as []any. - for _, associatedKey := range associatedKeys { - if associatedValue, ok := result[associatedKey].([]any); ok && len(associatedValue) > 0 { - newValue = associatedValue - - delete(result, associatedKey) // Delete the processed key-value pair from the result. - - wasDeleted = true - } - } - - // If no key was deleted, attempt to process the values as json.RawMessage. - if !wasDeleted { - for _, associatedKey := range associatedKeys { - if associatedValue, ok := result[associatedKey].(json.RawMessage); ok { - newValue = associatedValue - - delete(result, associatedKey) // Delete the processed key-value pair from the result. - - break - } - } - } - - result[primaryKey] = newValue // Set the new value for the primary key. - } -} - -// propsToAPI is a function that converts properties of Terraform user configuration schema to API compatible format. -func propsToAPI( - name string, - fullKeyPath []string, - types map[string]any, - properties map[string]any, - requiredFields map[string]struct{}, - data resourceDatable, -) (map[string]any, error) { - result := make(map[string]any, len(types)) - - fullKeyString := strings.Join(fullKeyPath, ".") - - for typeKey, typeValue := range types { - typeKey = userconfig.DecodeKey(typeKey) - - rawKey := typeKey - - if userconfig.IsKeyTyped(typeKey) { - rawKey = typeKey[:strings.LastIndexByte(typeKey, '_')] - } - - property, ok := properties[rawKey] - if !ok { - return nil, fmt.Errorf("%s.%s: key not found", fullKeyString, typeKey) - } - - if property == nil { - continue - } - - propertyAttributes, ok := property.(map[string]any) - if !ok { - return nil, fmt.Errorf("%s.%s: not a map", fullKeyString, typeKey) - } - - if createOnly, ok := propertyAttributes["create_only"]; ok && createOnly.(bool) && !data.IsNewResource() { - continue - } - - _, attributeTypes, err := userconfig.TerraformTypes(userconfig.SlicedString(propertyAttributes["type"])) - if err != nil { - return nil, err - } - - if len(attributeTypes) > 1 { - return nil, fmt.Errorf("%s.%s.type: multiple types", fullKeyString, typeKey) - } - - _, isRequired := requiredFields[typeKey] - - attributeType := attributeTypes[0] - - convertedValue, omit, err := itemToAPI( - name, - attributeType, - append(fullKeyPath, typeKey), - typeKey, - typeValue, - propertyAttributes, - isRequired, - data, - ) - if err != nil { - return nil, err - } - - if !omit { - result[typeKey] = convertedValue - } - } - - processManyToOneKeys(result) - - return result, nil -} - -// ToAPI is a function that converts filled Terraform user configuration schema to API compatible format. -func ToAPI( - schemaType userconfig.SchemaType, - serviceName string, - resourceData resourceDatable, -) (map[string]any, error) { - var result map[string]any - - fullKeyPath := []string{fmt.Sprintf("%s_user_config", serviceName)} - - terraformConfig, ok := resourceData.GetOk(fullKeyPath[0]) - if !ok || terraformConfig == nil { - return result, nil - } - - configSlice, ok := terraformConfig.([]any) - if !ok { - return nil, fmt.Errorf("%s (%d): not a slice", serviceName, schemaType) - } - - firstConfig := configSlice[0] - if firstConfig == nil { - return result, nil - } - - configMap, ok := firstConfig.(map[string]any) - if !ok { - return nil, fmt.Errorf("%s.0 (%d): not a map", serviceName, schemaType) - } - - properties, requiredProperties, err := propsReqs(schemaType, serviceName) - if err != nil { - return nil, err - } - - result, err = propsToAPI( - serviceName, - append(fullKeyPath, "0"), - configMap, - properties, - requiredProperties, - resourceData, - ) - if err != nil { - return nil, err - } - - return result, nil -} diff --git a/internal/schemautil/userconfig/apiconvert/toapi_test.go b/internal/schemautil/userconfig/apiconvert/toapi_test.go deleted file mode 100644 index 056a91ae2..000000000 --- a/internal/schemautil/userconfig/apiconvert/toapi_test.go +++ /dev/null @@ -1,1140 +0,0 @@ -package apiconvert - -import ( - "encoding/json" - "testing" - - "github.com/google/go-cmp/cmp" - - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig" -) - -// testResourceData is a resourceDatable compatible struct for testing. -type testResourceData struct { - d map[string]any - e map[string]struct{} - c map[string]struct{} - n bool -} - -// newTestResourceData is a constructor for testResourceData. -func newTestResourceData( - d map[string]any, - e map[string]struct{}, - c map[string]struct{}, - n bool, -) *testResourceData { - return &testResourceData{d: d, e: e, c: c, n: n} -} - -// GetOk is a test implementation of resourceDatable.GetOk. -func (t *testResourceData) GetOk(k string) (any, bool) { - v := t.d[k] - - _, e := t.e[k] - - return v, e -} - -// HasChange is a test implementation of resourceDatable.HasChange. -func (t *testResourceData) HasChange(k string) bool { - _, ok := t.c[k] - - return ok -} - -// IsNewResource is a test implementation of resourceDatable.IsNewResource. -func (t *testResourceData) IsNewResource() bool { - return t.n -} - -// TestToAPI is a test for ToAPI. -func TestToAPI(t *testing.T) { - type args struct { - schemaType userconfig.SchemaType - serviceName string - d resourceDatable - } - - tests := []struct { - name string - args args - want map[string]any - }{ - { - name: "boolean", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - d: newTestResourceData( - map[string]any{ - "m3db_user_config": []any{ - map[string]any{ - "m3coordinator_enable_graphite_carbon_ingest": true, - }, - }, - }, - map[string]struct{}{ - "m3db_user_config": {}, - }, - map[string]struct{}{ - "m3db_user_config.0.m3coordinator_enable_graphite_carbon_ingest": {}, - }, - false, - ), - }, - want: map[string]any{ - "m3coordinator_enable_graphite_carbon_ingest": true, - }, - }, - { - name: "boolean no changes", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - d: newTestResourceData( - map[string]any{ - "m3db_user_config": []any{ - map[string]any{ - "m3coordinator_enable_graphite_carbon_ingest": true, - }, - }, - }, - map[string]struct{}{ - "m3db_user_config": {}, - }, - map[string]struct{}{}, - false, - ), - }, - want: map[string]any{}, - }, - { - name: "integer", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - d: newTestResourceData( - map[string]any{ - "m3db_user_config": []any{ - map[string]any{ - "limits": []any{ - map[string]any{ - "max_recently_queried_series_blocks": 20000, - }, - }, - }, - }, - }, - map[string]struct{}{ - "m3db_user_config": {}, - }, - map[string]struct{}{ - "m3db_user_config.0.limits": {}, - "m3db_user_config.0.limits.0.max_recently_queried_series_blocks": {}, - }, - false, - ), - }, - want: map[string]any{ - "limits": map[string]any{ - "max_recently_queried_series_blocks": 20000, - }, - }, - }, - { - name: "integer no changes", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - d: newTestResourceData( - map[string]any{ - "m3db_user_config": []any{ - map[string]any{ - "limits": []any{ - map[string]any{ - "max_recently_queried_series_blocks": 20000, - }, - }, - }, - }, - }, - map[string]struct{}{ - "m3db_user_config": {}, - }, - map[string]struct{}{}, - false, - ), - }, - want: map[string]any{}, - }, - { - name: "number and object", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "kafka", - d: newTestResourceData( - map[string]any{ - "kafka_user_config": []any{ - map[string]any{ - "kafka": []any{ - map[string]any{ - "log_cleaner_min_cleanable_ratio": 0.5, - }, - }, - }, - }, - }, - map[string]struct{}{ - "kafka_user_config": {}, - }, - map[string]struct{}{ - "kafka_user_config.0.kafka": {}, - "kafka_user_config.0.kafka.0.log_cleaner_min_cleanable_ratio": {}, - }, - false, - ), - }, - want: map[string]any{ - "kafka": map[string]any{ - "log_cleaner_min_cleanable_ratio": 0.5, - }, - }, - }, - { - name: "number and object no changes", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "kafka", - d: newTestResourceData( - map[string]any{ - "kafka_user_config": []any{ - map[string]any{ - "kafka": []any{ - map[string]any{ - "log_cleaner_min_cleanable_ratio": 0.5, - }, - }, - }, - }, - }, - map[string]struct{}{ - "kafka_user_config": {}, - }, - map[string]struct{}{}, - false, - ), - }, - want: map[string]any{}, - }, - { - name: "create_only string", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - d: newTestResourceData( - map[string]any{ - "m3db_user_config": []any{ - map[string]any{ - "project_to_fork_from": "anotherprojectname", - }, - }, - }, - map[string]struct{}{ - "m3db_user_config": {}, - }, - map[string]struct{}{ - "m3db_user_config.0.project_to_fork_from": {}, - }, - true, - ), - }, - want: map[string]any{ - "project_to_fork_from": "anotherprojectname", - }, - }, - { - name: "create_only string during update", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - d: newTestResourceData( - map[string]any{ - "m3db_user_config": []any{ - map[string]any{ - "project_to_fork_from": "anotherprojectname", - }, - }, - }, - map[string]struct{}{ - "m3db_user_config": {}, - }, - map[string]struct{}{ - "m3db_user_config.0.project_to_fork_from": {}, - }, - false, - ), - }, - want: map[string]any{}, - }, - { - name: "array", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - d: newTestResourceData( - map[string]any{ - "m3db_user_config": []any{ - map[string]any{ - "namespaces": []any{ - map[string]any{ - "name": "default", - "type": "unaggregated", - }, - }, - }, - }, - }, - map[string]struct{}{ - "m3db_user_config": {}, - }, - map[string]struct{}{ - "m3db_user_config.0.namespaces": {}, - "m3db_user_config.0.namespaces.0": {}, - "m3db_user_config.0.namespaces.0.name": {}, - "m3db_user_config.0.namespaces.0.type": {}, - }, - false, - ), - }, - want: map[string]any{ - "namespaces": []any{ - map[string]any{ - "name": "default", - "type": "unaggregated", - }, - }, - }, - }, - { - name: "array no changes in one key", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - d: newTestResourceData( - map[string]any{ - "m3db_user_config": []any{ - map[string]any{ - "namespaces": []any{ - map[string]any{ - "name": "default", - "type": "unaggregated", - }, - }, - }, - }, - }, - map[string]struct{}{ - "m3db_user_config": {}, - "m3db_user_config.0.namespaces.0.name": {}, - }, - map[string]struct{}{ - "m3db_user_config.0.namespaces": {}, - "m3db_user_config.0.namespaces.0": {}, - "m3db_user_config.0.namespaces.0.type": {}, - }, - false, - ), - }, - want: map[string]any{ - "namespaces": []any{ - map[string]any{ - "name": "default", - "type": "unaggregated", - }, - }, - }, - }, - { - name: "array no changes", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - d: newTestResourceData( - map[string]any{ - "m3db_user_config": []any{ - map[string]any{ - "namespaces": []any{ - map[string]any{ - "name": "default", - "type": "unaggregated", - }, - }, - }, - }, - }, - map[string]struct{}{ - "m3db_user_config": {}, - }, - map[string]struct{}{}, - false, - ), - }, - want: map[string]any{}, - }, - { - name: "strings in many to one array", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - d: newTestResourceData( - map[string]any{ - "m3db_user_config": []any{ - map[string]any{ - "ip_filter": []any{ - "0.0.0.0/0", - "10.20.0.0/16", - }, - }, - }, - }, - map[string]struct{}{ - "m3db_user_config": {}, - }, - map[string]struct{}{ - "m3db_user_config.0.ip_filter": {}, - "m3db_user_config.0.ip_filter.0": {}, - "m3db_user_config.0.ip_filter.1": {}, - }, - false, - ), - }, - want: map[string]any{ - "ip_filter": []any{ - "0.0.0.0/0", - "10.20.0.0/16", - }, - }, - }, - { - name: "strings in many to one array no changes", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - d: newTestResourceData( - map[string]any{ - "m3db_user_config": []any{ - map[string]any{ - "ip_filter": []any{ - "0.0.0.0/0", - "10.20.0.0/16", - }, - }, - }, - }, - map[string]struct{}{ - "m3db_user_config": {}, - }, - map[string]struct{}{}, - false, - ), - }, - want: map[string]any{}, - }, - { - name: "strings in many to one array unset", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - d: newTestResourceData( - map[string]any{ - "m3db_user_config": []any{ - map[string]any{ - "ip_filter": []any{}, - }, - }, - }, - map[string]struct{}{ - "m3db_user_config": {}, - "m3db_user_config.0.ip_filter": {}, - }, - map[string]struct{}{ - "m3db_user_config.0.ip_filter": {}, - "m3db_user_config.0.ip_filter.0": {}, - "m3db_user_config.0.ip_filter.1": {}, - }, - false, - ), - }, - want: map[string]any{ - "ip_filter": json.RawMessage("[]"), // empty array - }, - }, - { - name: "objects in many to one array", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - d: newTestResourceData( - map[string]any{ - "m3db_user_config": []any{ - map[string]any{ - "ip_filter_object": []any{ - map[string]any{ - "description": "test", - "network": "0.0.0.0/0", - }, - map[string]any{ - "description": "", - "network": "10.20.0.0/16", - }, - }, - }, - }, - }, - map[string]struct{}{ - "m3db_user_config": {}, - }, - map[string]struct{}{ - "m3db_user_config.0.ip_filter_object": {}, - "m3db_user_config.0.ip_filter_object.0": {}, - "m3db_user_config.0.ip_filter_object.0.description": {}, - "m3db_user_config.0.ip_filter_object.0.network": {}, - "m3db_user_config.0.ip_filter_object.1": {}, - "m3db_user_config.0.ip_filter_object.1.description": {}, - "m3db_user_config.0.ip_filter_object.1.network": {}, - }, - false, - ), - }, - want: map[string]any{ - "ip_filter": []any{ - map[string]any{ - "description": "test", - "network": "0.0.0.0/0", - }, - map[string]any{ - "description": "", - "network": "10.20.0.0/16", - }, - }, - }, - }, - { - name: "objects in many to one array no changes in one element", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - d: newTestResourceData( - map[string]any{ - "m3db_user_config": []any{ - map[string]any{ - "ip_filter_object": []any{ - map[string]any{ - "description": "test", - "network": "0.0.0.0/0", - }, - map[string]any{ - "description": "", - "network": "10.20.0.0/16", - }, - map[string]any{ - "description": "foo", - "network": "1.3.3.7/32", - }, - }, - }, - }, - }, - map[string]struct{}{ - "m3db_user_config": {}, - "m3db_user_config.0.ip_filter_object.0": {}, - "m3db_user_config.0.ip_filter_object.0.description": {}, - "m3db_user_config.0.ip_filter_object.0.network": {}, - "m3db_user_config.0.ip_filter_object.1": {}, - "m3db_user_config.0.ip_filter_object.1.description": {}, - "m3db_user_config.0.ip_filter_object.1.network": {}, - }, - map[string]struct{}{ - "m3db_user_config.0.ip_filter_object": {}, - "m3db_user_config.0.ip_filter_object.1": {}, - "m3db_user_config.0.ip_filter_object.1.description": {}, - "m3db_user_config.0.ip_filter_object.1.network": {}, - "m3db_user_config.0.ip_filter_object.2": {}, - }, - false, - ), - }, - want: map[string]any{ - "ip_filter": []any{ - map[string]any{ - "description": "test", - "network": "0.0.0.0/0", - }, - map[string]any{ - "description": "", - "network": "10.20.0.0/16", - }, - map[string]any{ - "description": "foo", - "network": "1.3.3.7/32", - }, - }, - }, - }, - { - name: "objects in many to one array no changes", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - d: newTestResourceData( - map[string]any{ - "m3db_user_config": []any{ - map[string]any{ - "ip_filter_object": []any{ - map[string]any{ - "description": "test", - "network": "0.0.0.0/0", - }, - map[string]any{ - "description": "", - "network": "10.20.0.0/16", - }, - }, - }, - }, - }, - map[string]struct{}{ - "m3db_user_config": {}, - }, - map[string]struct{}{}, - false, - ), - }, - want: map[string]any{}, - }, - { - name: "migration from strings to objects in many to one array", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - d: newTestResourceData( - map[string]any{ - "m3db_user_config": []any{ - map[string]any{ - "ip_filter": []any{}, - "ip_filter_object": []any{ - map[string]any{ - "description": "test", - "network": "0.0.0.0/0", - }, - map[string]any{ - "description": "", - "network": "10.20.0.0/16", - }, - map[string]any{ - "description": "foo", - "network": "1.3.3.7/32", - }, - }, - }, - }, - }, - map[string]struct{}{ - "m3db_user_config": {}, - "m3db_user_config.0.ip_filter.0": {}, - "m3db_user_config.0.ip_filter.1": {}, - "m3db_user_config.0.ip_filter.2": {}, - "m3db_user_config.0.ip_filter_object.0": {}, - "m3db_user_config.0.ip_filter_object.0.description": {}, - "m3db_user_config.0.ip_filter_object.0.network": {}, - "m3db_user_config.0.ip_filter_object.1": {}, - "m3db_user_config.0.ip_filter_object.1.description": {}, - "m3db_user_config.0.ip_filter_object.1.network": {}, - }, - map[string]struct{}{ - "m3db_user_config.0.ip_filter": {}, - "m3db_user_config.0.ip_filter_object": {}, - "m3db_user_config.0.ip_filter_object.1": {}, - "m3db_user_config.0.ip_filter_object.1.description": {}, - "m3db_user_config.0.ip_filter_object.1.network": {}, - "m3db_user_config.0.ip_filter_object.2": {}, - }, - false, - ), - }, - want: map[string]any{ - "ip_filter": []any{ - map[string]any{ - "description": "test", - "network": "0.0.0.0/0", - }, - map[string]any{ - "description": "", - "network": "10.20.0.0/16", - }, - map[string]any{ - "description": "foo", - "network": "1.3.3.7/32", - }, - }, - }, - }, - { - name: "strings in many to one array via one_of", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - d: newTestResourceData( - map[string]any{ - "m3db_user_config": []any{ - map[string]any{ - "rules": []any{ - map[string]any{ - "mapping": []any{ - map[string]any{ - "namespaces": []any{ - "aggregated_*", - }, - }, - }, - }, - }, - }, - }, - }, - map[string]struct{}{ - "m3db_user_config": {}, - }, - map[string]struct{}{ - "m3db_user_config.0.rules": {}, - "m3db_user_config.0.rules.0": {}, - "m3db_user_config.0.rules.0.mapping": {}, - "m3db_user_config.0.rules.0.mapping.0": {}, - "m3db_user_config.0.rules.0.mapping.0.namespaces": {}, - "m3db_user_config.0.rules.0.mapping.0.namespaces.0": {}, - }, - false, - ), - }, - want: map[string]any{ - "rules": map[string]any{ - "mapping": []any{ - map[string]any{ - "namespaces": []any{ - "aggregated_*", - }, - }, - }, - }, - }, - }, - { - name: "strings in many to one array via one_of no changes", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - d: newTestResourceData( - map[string]any{ - "m3db_user_config": []any{ - map[string]any{ - "rules": []any{ - map[string]any{ - "mapping": []any{ - map[string]any{ - "namespaces": []any{ - "aggregated_*", - }, - }, - }, - }, - }, - }, - }, - }, - map[string]struct{}{ - "m3db_user_config": {}, - }, - map[string]struct{}{}, - false, - ), - }, - want: map[string]any{}, - }, - { - name: "objects in many to one array via one_of", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - d: newTestResourceData( - map[string]any{ - "m3db_user_config": []any{ - map[string]any{ - "rules": []any{ - map[string]any{ - "mapping": []any{ - map[string]any{ - "namespaces_object": []any{ - map[string]any{ - "resolution": "30s", - "retention": "48h", - }, - }, - }, - }, - }, - }, - }, - }, - }, - map[string]struct{}{ - "m3db_user_config": {}, - }, - map[string]struct{}{ - "m3db_user_config.0.rules": {}, - "m3db_user_config.0.rules.0": {}, - "m3db_user_config.0.rules.0.mapping": {}, - "m3db_user_config.0.rules.0.mapping.0": {}, - "m3db_user_config.0.rules.0.mapping.0.namespaces_object": {}, - "m3db_user_config.0.rules.0.mapping.0.namespaces_object.0": {}, - "m3db_user_config.0.rules.0.mapping.0.namespaces_object.0.resolution": {}, - "m3db_user_config.0.rules.0.mapping.0.namespaces_object.0.retention": {}, - }, - false, - ), - }, - want: map[string]any{ - "rules": map[string]any{ - "mapping": []any{ - map[string]any{ - "namespaces": []any{ - map[string]any{ - "resolution": "30s", - "retention": "48h", - }, - }, - }, - }, - }, - }, - }, - { - name: "objects in many to one array via one_of no changes in one key", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - d: newTestResourceData( - map[string]any{ - "m3db_user_config": []any{ - map[string]any{ - "rules": []any{ - map[string]any{ - "mapping": []any{ - map[string]any{ - "namespaces_object": []any{ - map[string]any{ - "resolution": "30s", - "retention": "48h", - }, - }, - }, - }, - }, - }, - }, - }, - }, - map[string]struct{}{ - "m3db_user_config": {}, - "m3db_user_config.0.rules.0.mapping.0.namespaces_object.0.resolution": {}, - }, - map[string]struct{}{ - "m3db_user_config.0.rules": {}, - "m3db_user_config.0.rules.0": {}, - "m3db_user_config.0.rules.0.mapping": {}, - "m3db_user_config.0.rules.0.mapping.0": {}, - "m3db_user_config.0.rules.0.mapping.0.namespaces_object": {}, - "m3db_user_config.0.rules.0.mapping.0.namespaces_object.0": {}, - "m3db_user_config.0.rules.0.mapping.0.namespaces_object.0.retention": {}, - }, - false, - ), - }, - want: map[string]any{ - "rules": map[string]any{ - "mapping": []any{ - map[string]any{ - "namespaces": []any{ - map[string]any{ - "resolution": "30s", - "retention": "48h", - }, - }, - }, - }, - }, - }, - }, - { - name: "objects in many to one array via one_of no changes", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - d: newTestResourceData( - map[string]any{ - "m3db_user_config": []any{ - map[string]any{ - "rules": []any{ - map[string]any{ - "mapping": []any{ - map[string]any{ - "namespaces_object": []any{ - map[string]any{ - "resolution": "30s", - "retention": "48h", - }, - }, - }, - }, - }, - }, - }, - }, - }, - map[string]struct{}{ - "m3db_user_config": {}, - }, - map[string]struct{}{}, - false, - ), - }, - want: map[string]any{}, - }, - { - name: "migration from strings to objects in many to one array via one_of", - args: args{ - schemaType: userconfig.ServiceTypes, - serviceName: "m3db", - d: newTestResourceData( - map[string]any{ - "m3db_user_config": []any{ - map[string]any{ - "rules": []any{ - map[string]any{ - "mapping": []any{ - map[string]any{ - "namespaces": []any{}, - "namespaces_object": []any{ - map[string]any{ - "resolution": "30s", - "retention": "48h", - }, - }, - }, - }, - }, - }, - }, - }, - }, - map[string]struct{}{ - "m3db_user_config": {}, - "m3db_user_config.0.rules.0.mapping.0.namespaces.0": {}, - "m3db_user_config.0.rules.0.mapping.0.namespaces_object.0.resolution": {}, - "m3db_user_config.0.rules.0.mapping.0.namespaces_object.0.retention": {}, - }, - map[string]struct{}{ - "m3db_user_config.0.rules": {}, - "m3db_user_config.0.rules.0": {}, - "m3db_user_config.0.rules.0.mapping": {}, - "m3db_user_config.0.rules.0.mapping.0": {}, - "m3db_user_config.0.rules.0.mapping.0.namespaces_object": {}, - "m3db_user_config.0.rules.0.mapping.0.namespaces_object.0": {}, - "m3db_user_config.0.rules.0.mapping.0.namespaces_object.0.resolution": {}, - "m3db_user_config.0.rules.0.mapping.0.namespaces_object.0.retention": {}, - }, - false, - ), - }, - want: map[string]any{ - "rules": map[string]any{ - "mapping": []any{ - map[string]any{ - "namespaces": []any{ - map[string]any{ - "resolution": "30s", - "retention": "48h", - }, - }, - }, - }, - }, - }, - }, - { - name: "required", - args: args{ - schemaType: userconfig.IntegrationEndpointTypes, - serviceName: "rsyslog", - d: newTestResourceData( - map[string]any{ - "rsyslog_user_config": []any{ - map[string]any{ - "format": "rfc5424", - "port": 514, - "server": "rsyslog-server", - "tls": false, - "logline": "some logline", - }, - }, - }, - map[string]struct{}{ - "rsyslog_user_config": {}, - }, - map[string]struct{}{ - "rsyslog_user_config.0.format": {}, - "rsyslog_user_config.0.port": {}, - "rsyslog_user_config.0.server": {}, - "rsyslog_user_config.0.logline": {}, - }, - false, - ), - }, - want: map[string]any{ - "format": "rfc5424", - "port": 514, - "server": "rsyslog-server", - "tls": false, - "logline": "some logline", - }, - }, - { - name: "nested arrays no changes", - args: args{ - schemaType: userconfig.IntegrationTypes, - serviceName: "clickhouse_kafka", - d: newTestResourceData( - map[string]any{ - "clickhouse_kafka_user_config": []any{ - map[string]any{ - "tables": []any{ - map[string]any{ - "name": "foo", - "topics": []any{ - map[string]any{ - "name": "bar", - }, - }, - "columns": []any{ - map[string]any{ - "name": "baz", - "type": "UInt16", - }, - }, - }, - }, - }, - }, - }, - map[string]struct{}{ - "clickhouse_kafka_user_config": {}, - }, - map[string]struct{}{ - "clickhouse_kafka_user_config.0.tables": {}, - "clickhouse_kafka_user_config.0.tables.0.topics": {}, - "clickhouse_kafka_user_config.0.tables.0.columns": {}, - }, - true, - ), - }, - want: map[string]any{ - "tables": []any{ - map[string]any{ - "name": "foo", - "topics": []any{ - map[string]any{ - "name": "bar", - }, - }, - "columns": []any{ - map[string]any{ - "name": "baz", - "type": "UInt16", - }, - }, - }, - }, - }, - }, - { - name: "nested arrays change in top level element", - args: args{ - schemaType: userconfig.IntegrationTypes, - serviceName: "clickhouse_kafka", - d: newTestResourceData( - map[string]any{ - "clickhouse_kafka_user_config": []any{ - map[string]any{ - "tables": []any{ - map[string]any{ - "name": "foo", - "topics": []any{ - map[string]any{ - "name": "bar", - }, - }, - "columns": []any{ - map[string]any{ - "name": "baz", - "type": "UInt16", - }, - }, - }, - }, - }, - }, - }, - map[string]struct{}{ - "clickhouse_kafka_user_config": {}, - "clickhouse_kafka_user_config.0.tables": {}, - "clickhouse_kafka_user_config.0.tables.0.topics": {}, - "clickhouse_kafka_user_config.0.tables.0.columns": {}, - }, - map[string]struct{}{ - "clickhouse_kafka_user_config.0.tables": {}, - "clickhouse_kafka_user_config.0.tables.0.name": {}, - }, - false, - ), - }, - want: map[string]any{ - "tables": []any{ - map[string]any{ - "name": "foo", - "topics": []any{ - map[string]any{ - "name": "bar", - }, - }, - "columns": []any{ - map[string]any{ - "name": "baz", - "type": "UInt16", - }, - }, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, _ := ToAPI(tt.args.schemaType, tt.args.serviceName, tt.args.d) - - if !cmp.Equal(got, tt.want) { - t.Errorf(cmp.Diff(tt.want, got)) - } - }) - } -} diff --git a/internal/schemautil/userconfig/apiconvert/util.go b/internal/schemautil/userconfig/apiconvert/util.go deleted file mode 100644 index 5a2ee0ec5..000000000 --- a/internal/schemautil/userconfig/apiconvert/util.go +++ /dev/null @@ -1,43 +0,0 @@ -package apiconvert - -import ( - "fmt" - - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig" -) - -// propsReqs is a function that returns a map of properties and required properties from a given schema type and node -// name. -func propsReqs(schemaType userconfig.SchemaType, nodeName string) (map[string]any, map[string]struct{}, error) { - representationMap, err := userconfig.CachedRepresentationMap(schemaType) - if err != nil { - return nil, nil, err - } - - nodeSchema, exists := representationMap[nodeName] - if !exists { - return nil, nil, fmt.Errorf("no schema found for %s (type %d)", nodeName, schemaType) - } - - schemaAsMap, ok := nodeSchema.(map[string]any) - if !ok { - return nil, nil, fmt.Errorf("schema %s (type %d) is not a map", nodeName, schemaType) - } - - properties, exists := schemaAsMap["properties"] - if !exists { - return nil, nil, fmt.Errorf("no properties found for %s (type %d)", nodeName, schemaType) - } - - propertiesAsMap, ok := properties.(map[string]any) - if !ok { - return nil, nil, fmt.Errorf("properties of schema %s (type %d) are not a map", nodeName, schemaType) - } - - requiredProperties := map[string]struct{}{} - if requiredPropertiesSlice, exists := schemaAsMap["required"].([]any); exists { - requiredProperties = userconfig.SliceToKeyedMap(requiredPropertiesSlice) - } - - return propertiesAsMap, requiredProperties, nil -} diff --git a/internal/schemautil/userconfig/apiconvert/util_test.go b/internal/schemautil/userconfig/apiconvert/util_test.go deleted file mode 100644 index 8dc3a1e73..000000000 --- a/internal/schemautil/userconfig/apiconvert/util_test.go +++ /dev/null @@ -1,146 +0,0 @@ -package apiconvert - -import ( - "math" - "testing" - - "github.com/google/go-cmp/cmp" - - "github.com/aiven/terraform-provider-aiven/internal/schemautil/userconfig" -) - -// TestPropsReqs is a test for propsReqs. -func TestPropsReqs(t *testing.T) { - type args struct { - schemaType userconfig.SchemaType - serviceName string - } - - tests := []struct { - name string - args args - want struct { - wantP map[string]any - wantR map[string]struct{} - } - }{ - { - name: "basic", - args: args{ - schemaType: userconfig.IntegrationEndpointTypes, - serviceName: "rsyslog", - }, - want: struct { - wantP map[string]any - wantR map[string]struct{} - }{ - map[string]any{ - "ca": map[string]any{ - "example": "-----BEGIN CERTIFICATE-----\n...\n-----END CERTIFICATE-----\n", - "max_length": 16384, - "title": "PEM encoded CA certificate", - "type": []any{ - "string", - "null", - }, - }, - "cert": map[string]any{ - "example": "-----BEGIN CERTIFICATE-----\n...\n-----END CERTIFICATE-----\n", - "max_length": 16384, - "title": "PEM encoded client certificate", - "type": []any{ - "string", - "null", - }, - }, - "format": map[string]any{ - "default": "rfc5424", - "enum": []any{ - map[string]any{"value": "rfc5424"}, - map[string]any{"value": "rfc3164"}, - map[string]any{"value": "custom"}, - }, - "example": "rfc5424", - "title": "Message format", - "type": "string", - }, - "key": map[string]any{ - "example": "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n", - "max_length": 16384, - "title": "PEM encoded client key", - "type": []any{ - "string", - "null", - }, - }, - "logline": map[string]any{ - "example": "<%pri%>%timestamp:::date-rfc3339% %HOSTNAME% %app-name% %msg%", - "max_length": 512, - "min_length": 1, - "pattern": "^[ -~\\t]+$", - "title": "Custom syslog message format", - "type": "string", - }, - "max_message_size": map[string]any{ - "default": "8192", - "example": "8192", - "maximum": float64(math.MaxInt32), - "minimum": 2048, - "title": "Rsyslog max message size", - "type": "integer", - }, - "port": map[string]any{ - "default": "514", - "example": "514", - "maximum": 65535, - "minimum": 1, - "title": "Rsyslog server port", - "type": "integer", - }, - "sd": map[string]any{ - "example": "TOKEN tag=\"LiteralValue\"", - "max_length": 1024, - "title": "Structured data block for log message", - "type": []any{ - "string", - "null", - }, - }, - "server": map[string]any{ - "example": "logs.example.com", - "max_length": 255, - "min_length": 4, - "title": "Rsyslog server IP address or hostname", - "type": "string", - }, - "tls": map[string]any{ - "default": true, - "example": true, - "title": "Require TLS", - "type": "boolean", - }, - }, - map[string]struct{}{ - "format": {}, - "port": {}, - "server": {}, - "tls": {}, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotP, gotR, _ := propsReqs(tt.args.schemaType, tt.args.serviceName) - - if !cmp.Equal(gotP, tt.want.wantP) { - t.Errorf(cmp.Diff(tt.want.wantP, gotP)) - } - - if !cmp.Equal(gotR, tt.want.wantR) { - t.Errorf(cmp.Diff(tt.want.wantR, gotR)) - } - }) - } -} diff --git a/internal/schemautil/userconfig/const.go b/internal/schemautil/userconfig/const.go deleted file mode 100644 index 1bbd9a640..000000000 --- a/internal/schemautil/userconfig/const.go +++ /dev/null @@ -1,9 +0,0 @@ -package userconfig - -const ( - // SchemaPackage is the fully-qualified package name of the schema package. - SchemaPackage = "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - // SchemaUtilPackage is the fully-qualified package name of the schemautil package. - SchemaUtilPackage = "github.com/aiven/terraform-provider-aiven/internal/schemautil" -) diff --git a/internal/schemautil/userconfig/convert.go b/internal/schemautil/userconfig/convert.go deleted file mode 100644 index a3ab192c1..000000000 --- a/internal/schemautil/userconfig/convert.go +++ /dev/null @@ -1,102 +0,0 @@ -//nolint:unused -package userconfig - -import ( - "fmt" - "strings" - - "github.com/dave/jennifer/jen" -) - -// convertPropertyToSchema is a function that converts a property to a Terraform schema. -func convertPropertyToSchema( - propertyName string, - propertyAttributes map[string]any, - terraformType string, - addDescription bool, - isRequired bool, -) jen.Dict { - resultDict := jen.Dict{ - jen.Id("Type"): jen.Qual(SchemaPackage, terraformType), - } - - if addDescription { - isDeprecated, description := descriptionForProperty(propertyAttributes, terraformType) - - resultDict[jen.Id("Description")] = jen.Lit(description) - - if isDeprecated { - resultDict[jen.Id("Deprecated")] = jen.Lit("Usage of this field is discouraged.") - } - } - - if isRequired { - resultDict[jen.Id("Required")] = jen.Lit(true) - } else { - resultDict[jen.Id("Optional")] = jen.Lit(true) - - if defaultValue, ok := propertyAttributes["default"]; ok && isTerraformTypePrimitive(terraformType) { - resultDict[jen.Id("Default")] = jen.Lit(defaultValue) - } - } - - if createOnly, ok := propertyAttributes["create_only"]; ok && createOnly.(bool) { - resultDict[jen.Id("ForceNew")] = jen.Lit(true) - } - - if strings.Contains(propertyName, "api_key") || strings.Contains(propertyName, "password") { - resultDict[jen.Id("Sensitive")] = jen.Lit(true) - } - - // TODO: Generate validation rules for generated schema properties, also validate that value is within enum values. - - return resultDict -} - -// convertPropertiesToSchemaMap is a function that converts a map of properties to a map of Terraform schemas. -func convertPropertiesToSchemaMap(properties map[string]any, requiredProperties map[string]struct{}) (jen.Dict, error) { - resultDict := make(jen.Dict, len(properties)) - - for propertyName, propertyValue := range properties { - propertyAttributes, ok := propertyValue.(map[string]any) - if !ok { - continue - } - - terraformTypes, aivenTypes, err := TerraformTypes(SlicedString(propertyAttributes["type"])) - if err != nil { - return nil, err - } - - if len(terraformTypes) > 1 { - return nil, fmt.Errorf("multiple types for %s", propertyName) - } - - terraformType, aivenType := terraformTypes[0], aivenTypes[0] - - _, isRequired := requiredProperties[propertyName] - - var schemaStatements map[string]*jen.Statement - - if isTerraformTypePrimitive(terraformType) { - schemaStatements = handlePrimitiveTypeProperty(propertyName, propertyAttributes, terraformType, isRequired) - } else { - schemaStatements, err = handleAggregateTypeProperty( - propertyName, propertyAttributes, terraformType, aivenType, - ) - if err != nil { - return nil, err - } - } - - if schemaStatements == nil { - continue - } - - for keyName, valueNode := range schemaStatements { - resultDict[jen.Lit(EncodeKey(keyName))] = valueNode - } - } - - return resultDict, nil -} diff --git a/internal/schemautil/userconfig/dist/integration_endpoint_types.go b/internal/schemautil/userconfig/dist/integration_endpoint_types.go deleted file mode 100644 index a83f396c1..000000000 --- a/internal/schemautil/userconfig/dist/integration_endpoint_types.go +++ /dev/null @@ -1,562 +0,0 @@ -// Code generated by internal/schemautil/userconfig/userconfig_test.go; DO NOT EDIT. - -package dist - -import ( - schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" -) - -// IntegrationEndpointTypeDatadog is a generated function returning the schema of the datadog IntegrationEndpointType. -func IntegrationEndpointTypeDatadog() *schema.Schema { - s := map[string]*schema.Schema{ - "datadog_api_key": { - Description: "Datadog API key.", - Required: true, - Sensitive: true, - Type: schema.TypeString, - }, - "datadog_tags": { - Description: "Custom tags provided by user.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "comment": { - Description: "Optional tag explanation.", - Optional: true, - Type: schema.TypeString, - }, - "tag": { - Description: "Tag format and usage are described here: https://docs.datadoghq.com/getting_started/tagging. Tags with prefix 'aiven-' are reserved for Aiven.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 32, - Optional: true, - Type: schema.TypeList, - }, - "disable_consumer_stats": { - Description: "Disable consumer group metrics.", - Optional: true, - Type: schema.TypeBool, - }, - "kafka_consumer_check_instances": { - Description: "Number of separate instances to fetch kafka consumer statistics with.", - Optional: true, - Type: schema.TypeInt, - }, - "kafka_consumer_stats_timeout": { - Description: "Number of seconds that datadog will wait to get consumer statistics from brokers.", - Optional: true, - Type: schema.TypeInt, - }, - "max_partition_contexts": { - Description: "Maximum number of partition contexts to send.", - Optional: true, - Type: schema.TypeInt, - }, - "site": { - Description: "Datadog intake site. Defaults to datadoghq.com.", - Optional: true, - Type: schema.TypeString, - }, - } - - return &schema.Schema{ - Description: "Datadog user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// IntegrationEndpointTypeExternalAwsCloudwatchLogs is a generated function returning the schema of the external_aws_cloudwatch_logs IntegrationEndpointType. -func IntegrationEndpointTypeExternalAwsCloudwatchLogs() *schema.Schema { - s := map[string]*schema.Schema{ - "access_key": { - Description: "AWS access key. Required permissions are logs:CreateLogGroup, logs:CreateLogStream, logs:PutLogEvents and logs:DescribeLogStreams.", - Required: true, - Type: schema.TypeString, - }, - "log_group_name": { - Description: "AWS CloudWatch log group name.", - Optional: true, - Type: schema.TypeString, - }, - "region": { - Description: "AWS region.", - Required: true, - Type: schema.TypeString, - }, - "secret_key": { - Description: "AWS secret key.", - Required: true, - Type: schema.TypeString, - }, - } - - return &schema.Schema{ - Description: "ExternalAwsCloudwatchLogs user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// IntegrationEndpointTypeExternalAwsCloudwatchMetrics is a generated function returning the schema of the external_aws_cloudwatch_metrics IntegrationEndpointType. -func IntegrationEndpointTypeExternalAwsCloudwatchMetrics() *schema.Schema { - s := map[string]*schema.Schema{ - "access_key": { - Description: "AWS access key. Required permissions are cloudwatch:PutMetricData.", - Required: true, - Type: schema.TypeString, - }, - "namespace": { - Description: "AWS CloudWatch Metrics Namespace.", - Required: true, - Type: schema.TypeString, - }, - "region": { - Description: "AWS region.", - Required: true, - Type: schema.TypeString, - }, - "secret_key": { - Description: "AWS secret key.", - Required: true, - Type: schema.TypeString, - }, - } - - return &schema.Schema{ - Description: "ExternalAwsCloudwatchMetrics user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// IntegrationEndpointTypeExternalElasticsearchLogs is a generated function returning the schema of the external_elasticsearch_logs IntegrationEndpointType. -func IntegrationEndpointTypeExternalElasticsearchLogs() *schema.Schema { - s := map[string]*schema.Schema{ - "ca": { - Description: "PEM encoded CA certificate.", - Optional: true, - Type: schema.TypeString, - }, - "index_days_max": { - Default: "3", - Description: "Maximum number of days of logs to keep. The default value is `3`.", - Optional: true, - Type: schema.TypeInt, - }, - "index_prefix": { - Description: "Elasticsearch index prefix. The default value is `logs`.", - Required: true, - Type: schema.TypeString, - }, - "timeout": { - Default: "10.0", - Description: "Elasticsearch request timeout limit. The default value is `10.0`.", - Optional: true, - Type: schema.TypeFloat, - }, - "url": { - Description: "Elasticsearch connection URL.", - Required: true, - Type: schema.TypeString, - }, - } - - return &schema.Schema{ - Description: "ExternalElasticsearchLogs user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// IntegrationEndpointTypeExternalGoogleCloudBigquery is a generated function returning the schema of the external_google_cloud_bigquery IntegrationEndpointType. -func IntegrationEndpointTypeExternalGoogleCloudBigquery() *schema.Schema { - s := map[string]*schema.Schema{ - "project_id": { - Description: "GCP project id.", - Required: true, - Type: schema.TypeString, - }, - "service_account_credentials": { - Description: "This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys .", - Required: true, - Type: schema.TypeString, - }, - } - - return &schema.Schema{ - Description: "ExternalGoogleCloudBigquery user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// IntegrationEndpointTypeExternalGoogleCloudLogging is a generated function returning the schema of the external_google_cloud_logging IntegrationEndpointType. -func IntegrationEndpointTypeExternalGoogleCloudLogging() *schema.Schema { - s := map[string]*schema.Schema{ - "log_id": { - Description: "Google Cloud Logging log id.", - Required: true, - Type: schema.TypeString, - }, - "project_id": { - Description: "GCP project id.", - Required: true, - Type: schema.TypeString, - }, - "service_account_credentials": { - Description: "This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys .", - Required: true, - Type: schema.TypeString, - }, - } - - return &schema.Schema{ - Description: "ExternalGoogleCloudLogging user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// IntegrationEndpointTypeExternalKafka is a generated function returning the schema of the external_kafka IntegrationEndpointType. -func IntegrationEndpointTypeExternalKafka() *schema.Schema { - s := map[string]*schema.Schema{ - "bootstrap_servers": { - Description: "Bootstrap servers.", - Required: true, - Type: schema.TypeString, - }, - "sasl_mechanism": { - Description: "SASL mechanism used for connections to the Kafka server.", - Optional: true, - Type: schema.TypeString, - }, - "sasl_plain_password": { - Description: "Password for SASL PLAIN mechanism in the Kafka server.", - Optional: true, - Sensitive: true, - Type: schema.TypeString, - }, - "sasl_plain_username": { - Description: "Username for SASL PLAIN mechanism in the Kafka server.", - Optional: true, - Type: schema.TypeString, - }, - "security_protocol": { - Description: "Security protocol.", - Required: true, - Type: schema.TypeString, - }, - "ssl_ca_cert": { - Description: "PEM-encoded CA certificate.", - Optional: true, - Type: schema.TypeString, - }, - "ssl_client_cert": { - Description: "PEM-encoded client certificate.", - Optional: true, - Type: schema.TypeString, - }, - "ssl_client_key": { - Description: "PEM-encoded client key.", - Optional: true, - Type: schema.TypeString, - }, - "ssl_endpoint_identification_algorithm": { - Description: "The endpoint identification algorithm to validate server hostname using server certificate.", - Optional: true, - Type: schema.TypeString, - }, - } - - return &schema.Schema{ - Description: "ExternalKafka user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// IntegrationEndpointTypeExternalOpensearchLogs is a generated function returning the schema of the external_opensearch_logs IntegrationEndpointType. -func IntegrationEndpointTypeExternalOpensearchLogs() *schema.Schema { - s := map[string]*schema.Schema{ - "ca": { - Description: "PEM encoded CA certificate.", - Optional: true, - Type: schema.TypeString, - }, - "index_days_max": { - Default: "3", - Description: "Maximum number of days of logs to keep. The default value is `3`.", - Optional: true, - Type: schema.TypeInt, - }, - "index_prefix": { - Description: "OpenSearch index prefix. The default value is `logs`.", - Required: true, - Type: schema.TypeString, - }, - "timeout": { - Default: "10.0", - Description: "OpenSearch request timeout limit. The default value is `10.0`.", - Optional: true, - Type: schema.TypeFloat, - }, - "url": { - Description: "OpenSearch connection URL.", - Required: true, - Type: schema.TypeString, - }, - } - - return &schema.Schema{ - Description: "ExternalOpensearchLogs user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// IntegrationEndpointTypeExternalPostgresql is a generated function returning the schema of the external_postgresql IntegrationEndpointType. -func IntegrationEndpointTypeExternalPostgresql() *schema.Schema { - s := map[string]*schema.Schema{ - "default_database": { - Description: "Default database.", - Optional: true, - Type: schema.TypeString, - }, - "host": { - Description: "Hostname or IP address of the server.", - Required: true, - Type: schema.TypeString, - }, - "password": { - Description: "Password.", - Optional: true, - Sensitive: true, - Type: schema.TypeString, - }, - "port": { - Description: "Port number of the server.", - Required: true, - Type: schema.TypeInt, - }, - "ssl_client_certificate": { - Default: "", - Description: "Client certificate.", - Optional: true, - Type: schema.TypeString, - }, - "ssl_client_key": { - Default: "", - Description: "Client key.", - Optional: true, - Type: schema.TypeString, - }, - "ssl_mode": { - Default: "verify-full", - Description: "SSL Mode. The default value is `verify-full`.", - Optional: true, - Type: schema.TypeString, - }, - "ssl_root_cert": { - Default: "", - Description: "SSL Root Cert.", - Optional: true, - Type: schema.TypeString, - }, - "username": { - Description: "User name.", - Required: true, - Type: schema.TypeString, - }, - } - - return &schema.Schema{ - Description: "ExternalPostgresql user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// IntegrationEndpointTypeExternalSchemaRegistry is a generated function returning the schema of the external_schema_registry IntegrationEndpointType. -func IntegrationEndpointTypeExternalSchemaRegistry() *schema.Schema { - s := map[string]*schema.Schema{ - "authentication": { - Description: "Authentication method.", - Required: true, - Type: schema.TypeString, - }, - "basic_auth_password": { - Description: "Basic authentication password.", - Optional: true, - Sensitive: true, - Type: schema.TypeString, - }, - "basic_auth_username": { - Description: "Basic authentication user name.", - Optional: true, - Type: schema.TypeString, - }, - "url": { - Description: "Schema Registry URL.", - Required: true, - Type: schema.TypeString, - }, - } - - return &schema.Schema{ - Description: "ExternalSchemaRegistry user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// IntegrationEndpointTypeJolokia is a generated function returning the schema of the jolokia IntegrationEndpointType. -func IntegrationEndpointTypeJolokia() *schema.Schema { - s := map[string]*schema.Schema{ - "basic_auth_password": { - Description: "Jolokia basic authentication password.", - Optional: true, - Sensitive: true, - Type: schema.TypeString, - }, - "basic_auth_username": { - Description: "Jolokia basic authentication username.", - Optional: true, - Type: schema.TypeString, - }, - } - - return &schema.Schema{ - Description: "Jolokia user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// IntegrationEndpointTypePrometheus is a generated function returning the schema of the prometheus IntegrationEndpointType. -func IntegrationEndpointTypePrometheus() *schema.Schema { - s := map[string]*schema.Schema{ - "basic_auth_password": { - Description: "Prometheus basic authentication password.", - Optional: true, - Sensitive: true, - Type: schema.TypeString, - }, - "basic_auth_username": { - Description: "Prometheus basic authentication username.", - Optional: true, - Type: schema.TypeString, - }, - } - - return &schema.Schema{ - Description: "Prometheus user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// IntegrationEndpointTypeRsyslog is a generated function returning the schema of the rsyslog IntegrationEndpointType. -func IntegrationEndpointTypeRsyslog() *schema.Schema { - s := map[string]*schema.Schema{ - "ca": { - Description: "PEM encoded CA certificate.", - Optional: true, - Type: schema.TypeString, - }, - "cert": { - Description: "PEM encoded client certificate.", - Optional: true, - Type: schema.TypeString, - }, - "format": { - Description: "Message format. The default value is `rfc5424`.", - Required: true, - Type: schema.TypeString, - }, - "key": { - Description: "PEM encoded client key.", - Optional: true, - Type: schema.TypeString, - }, - "logline": { - Description: "Custom syslog message format.", - Optional: true, - Type: schema.TypeString, - }, - "max_message_size": { - Default: "8192", - Description: "Rsyslog max message size. The default value is `8192`.", - Optional: true, - Type: schema.TypeInt, - }, - "port": { - Description: "Rsyslog server port. The default value is `514`.", - Required: true, - Type: schema.TypeInt, - }, - "sd": { - Description: "Structured data block for log message.", - Optional: true, - Type: schema.TypeString, - }, - "server": { - Description: "Rsyslog server IP address or hostname.", - Required: true, - Type: schema.TypeString, - }, - "tls": { - Description: "Require TLS. The default value is `true`.", - Required: true, - Type: schema.TypeBool, - }, - } - - return &schema.Schema{ - Description: "Rsyslog user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} diff --git a/internal/schemautil/userconfig/dist/integration_types.go b/internal/schemautil/userconfig/dist/integration_types.go deleted file mode 100644 index de191b448..000000000 --- a/internal/schemautil/userconfig/dist/integration_types.go +++ /dev/null @@ -1,1276 +0,0 @@ -// Code generated by internal/schemautil/userconfig/userconfig_test.go; DO NOT EDIT. - -package dist - -import ( - schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" -) - -// IntegrationTypeClickhouseKafka is a generated function returning the schema of the clickhouse_kafka IntegrationType. -func IntegrationTypeClickhouseKafka() *schema.Schema { - s := map[string]*schema.Schema{"tables": { - Description: "Tables to create.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "auto_offset_reset": { - Default: "earliest", - Description: "Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.", - Optional: true, - Type: schema.TypeString, - }, - "columns": { - Description: "Table columns.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "name": { - Description: "Column name.", - Required: true, - Type: schema.TypeString, - }, - "type": { - Description: "Column type.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 100, - Optional: true, - Type: schema.TypeList, - }, - "data_format": { - Description: "Message data format. The default value is `JSONEachRow`.", - Required: true, - Type: schema.TypeString, - }, - "date_time_input_format": { - Default: "basic", - Description: "Method to read DateTime from text input formats. The default value is `basic`.", - Optional: true, - Type: schema.TypeString, - }, - "group_name": { - Description: "Kafka consumers group. The default value is `clickhouse`.", - Required: true, - Type: schema.TypeString, - }, - "handle_error_mode": { - Default: "default", - Description: "How to handle errors for Kafka engine. The default value is `default`.", - Optional: true, - Type: schema.TypeString, - }, - "max_block_size": { - Default: "0", - Description: "Number of row collected by poll(s) for flushing data from Kafka. The default value is `0`.", - Optional: true, - Type: schema.TypeInt, - }, - "max_rows_per_message": { - Default: "1", - Description: "The maximum number of rows produced in one kafka message for row-based formats. The default value is `1`.", - Optional: true, - Type: schema.TypeInt, - }, - "name": { - Description: "Name of the table.", - Required: true, - Type: schema.TypeString, - }, - "num_consumers": { - Default: "1", - Description: "The number of consumers per table per replica. The default value is `1`.", - Optional: true, - Type: schema.TypeInt, - }, - "poll_max_batch_size": { - Default: "0", - Description: "Maximum amount of messages to be polled in a single Kafka poll. The default value is `0`.", - Optional: true, - Type: schema.TypeInt, - }, - "skip_broken_messages": { - Default: "0", - Description: "Skip at least this number of broken messages from Kafka topic per block. The default value is `0`.", - Optional: true, - Type: schema.TypeInt, - }, - "topics": { - Description: "Kafka topics.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{"name": { - Description: "Name of the topic.", - Required: true, - Type: schema.TypeString, - }}}, - MaxItems: 100, - Optional: true, - Type: schema.TypeList, - }, - }}, - MaxItems: 100, - Optional: true, - Type: schema.TypeList, - }} - - return &schema.Schema{ - Description: "ClickhouseKafka user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// IntegrationTypeClickhousePostgresql is a generated function returning the schema of the clickhouse_postgresql IntegrationType. -func IntegrationTypeClickhousePostgresql() *schema.Schema { - s := map[string]*schema.Schema{"databases": { - Description: "Databases to expose.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "database": { - Default: "defaultdb", - Description: "PostgreSQL database to expose. The default value is `defaultdb`.", - Optional: true, - Type: schema.TypeString, - }, - "schema": { - Default: "public", - Description: "PostgreSQL schema to expose. The default value is `public`.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 10, - Optional: true, - Type: schema.TypeList, - }} - - return &schema.Schema{ - Description: "ClickhousePostgresql user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// IntegrationTypeDatadog is a generated function returning the schema of the datadog IntegrationType. -func IntegrationTypeDatadog() *schema.Schema { - s := map[string]*schema.Schema{ - "datadog_dbm_enabled": { - Description: "Enable Datadog Database Monitoring.", - Optional: true, - Type: schema.TypeBool, - }, - "datadog_tags": { - Description: "Custom tags provided by user.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "comment": { - Description: "Optional tag explanation.", - Optional: true, - Type: schema.TypeString, - }, - "tag": { - Description: "Tag format and usage are described here: https://docs.datadoghq.com/getting_started/tagging. Tags with prefix 'aiven-' are reserved for Aiven.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 32, - Optional: true, - Type: schema.TypeList, - }, - "exclude_consumer_groups": { - Description: "List of custom metrics.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "exclude_topics": { - Description: "List of topics to exclude.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "include_consumer_groups": { - Description: "List of custom metrics.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "include_topics": { - Description: "List of topics to include.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "kafka_custom_metrics": { - Description: "List of custom metrics.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "max_jmx_metrics": { - Description: "Maximum number of JMX metrics to send.", - Optional: true, - Type: schema.TypeInt, - }, - "opensearch": { - Description: "Datadog Opensearch Options.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "index_stats_enabled": { - Description: "Enable Datadog Opensearch Index Monitoring.", - Optional: true, - Type: schema.TypeBool, - }, - "pending_task_stats_enabled": { - Description: "Enable Datadog Opensearch Pending Task Monitoring.", - Optional: true, - Type: schema.TypeBool, - }, - "pshard_stats_enabled": { - Description: "Enable Datadog Opensearch Primary Shard Monitoring.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "index_stats_enabled": { - Description: "Enable Datadog Opensearch Index Monitoring.", - Optional: true, - Type: schema.TypeBool, - }, - "pending_task_stats_enabled": { - Description: "Enable Datadog Opensearch Pending Task Monitoring.", - Optional: true, - Type: schema.TypeBool, - }, - "pshard_stats_enabled": { - Description: "Enable Datadog Opensearch Primary Shard Monitoring.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "redis": { - Description: "Datadog Redis Options.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{"command_stats_enabled": { - Default: false, - Description: "Enable command_stats option in the agent's configuration. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }}), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{"command_stats_enabled": { - Default: false, - Description: "Enable command_stats option in the agent's configuration. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }}}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - } - - return &schema.Schema{ - Description: "Datadog user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// IntegrationTypeExternalAwsCloudwatchLogs is a generated function returning the schema of the external_aws_cloudwatch_logs IntegrationType. -func IntegrationTypeExternalAwsCloudwatchLogs() *schema.Schema { - s := map[string]*schema.Schema{"selected_log_fields": { - Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 5, - Optional: true, - Type: schema.TypeList, - }} - - return &schema.Schema{ - Description: "ExternalAwsCloudwatchLogs user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// IntegrationTypeExternalAwsCloudwatchMetrics is a generated function returning the schema of the external_aws_cloudwatch_metrics IntegrationType. -func IntegrationTypeExternalAwsCloudwatchMetrics() *schema.Schema { - s := map[string]*schema.Schema{ - "dropped_metrics": { - Description: "Metrics to not send to AWS CloudWatch (takes precedence over extra_metrics).", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "field": { - Description: "Identifier of a value in the metric.", - Required: true, - Type: schema.TypeString, - }, - "metric": { - Description: "Identifier of the metric.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "extra_metrics": { - Description: "Metrics to allow through to AWS CloudWatch (in addition to default metrics).", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "field": { - Description: "Identifier of a value in the metric.", - Required: true, - Type: schema.TypeString, - }, - "metric": { - Description: "Identifier of the metric.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - } - - return &schema.Schema{ - Description: "ExternalAwsCloudwatchMetrics user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// IntegrationTypeExternalElasticsearchLogs is a generated function returning the schema of the external_elasticsearch_logs IntegrationType. -func IntegrationTypeExternalElasticsearchLogs() *schema.Schema { - s := map[string]*schema.Schema{"selected_log_fields": { - Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 5, - Optional: true, - Type: schema.TypeList, - }} - - return &schema.Schema{ - Description: "ExternalElasticsearchLogs user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// IntegrationTypeExternalOpensearchLogs is a generated function returning the schema of the external_opensearch_logs IntegrationType. -func IntegrationTypeExternalOpensearchLogs() *schema.Schema { - s := map[string]*schema.Schema{"selected_log_fields": { - Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 5, - Optional: true, - Type: schema.TypeList, - }} - - return &schema.Schema{ - Description: "ExternalOpensearchLogs user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// IntegrationTypeKafkaConnect is a generated function returning the schema of the kafka_connect IntegrationType. -func IntegrationTypeKafkaConnect() *schema.Schema { - s := map[string]*schema.Schema{"kafka_connect": { - Description: "Kafka Connect service configuration values.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "config_storage_topic": { - Description: "The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id.", - Optional: true, - Type: schema.TypeString, - }, - "group_id": { - Description: "A unique string that identifies the Connect cluster group this worker belongs to.", - Optional: true, - Type: schema.TypeString, - }, - "offset_storage_topic": { - Description: "The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id.", - Optional: true, - Type: schema.TypeString, - }, - "status_storage_topic": { - Description: "The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id.", - Optional: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "config_storage_topic": { - Description: "The name of the topic where connector and task configuration data are stored.This must be the same for all workers with the same group_id.", - Optional: true, - Type: schema.TypeString, - }, - "group_id": { - Description: "A unique string that identifies the Connect cluster group this worker belongs to.", - Optional: true, - Type: schema.TypeString, - }, - "offset_storage_topic": { - Description: "The name of the topic where connector and task configuration offsets are stored.This must be the same for all workers with the same group_id.", - Optional: true, - Type: schema.TypeString, - }, - "status_storage_topic": { - Description: "The name of the topic where connector and task configuration status updates are stored.This must be the same for all workers with the same group_id.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }} - - return &schema.Schema{ - Description: "KafkaConnect user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// IntegrationTypeKafkaLogs is a generated function returning the schema of the kafka_logs IntegrationType. -func IntegrationTypeKafkaLogs() *schema.Schema { - s := map[string]*schema.Schema{ - "kafka_topic": { - Description: "Topic name.", - Required: true, - Type: schema.TypeString, - }, - "selected_log_fields": { - Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 5, - Optional: true, - Type: schema.TypeList, - }, - } - - return &schema.Schema{ - Description: "KafkaLogs user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// IntegrationTypeKafkaMirrormaker is a generated function returning the schema of the kafka_mirrormaker IntegrationType. -func IntegrationTypeKafkaMirrormaker() *schema.Schema { - s := map[string]*schema.Schema{ - "cluster_alias": { - Description: "The alias under which the Kafka cluster is known to MirrorMaker. Can contain the following symbols: ASCII alphanumerics, '.', '_', and '-'.", - Optional: true, - Type: schema.TypeString, - }, - "kafka_mirrormaker": { - Description: "Kafka MirrorMaker configuration values.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "consumer_fetch_min_bytes": { - Description: "The minimum amount of data the server should return for a fetch request.", - Optional: true, - Type: schema.TypeInt, - }, - "producer_batch_size": { - Description: "The batch size in bytes producer will attempt to collect before publishing to broker.", - Optional: true, - Type: schema.TypeInt, - }, - "producer_buffer_memory": { - Description: "The amount of bytes producer can use for buffering data before publishing to broker.", - Optional: true, - Type: schema.TypeInt, - }, - "producer_compression_type": { - Description: "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", - Optional: true, - Type: schema.TypeString, - }, - "producer_linger_ms": { - Description: "The linger time (ms) for waiting new data to arrive for publishing.", - Optional: true, - Type: schema.TypeInt, - }, - "producer_max_request_size": { - Description: "The maximum request size in bytes.", - Optional: true, - Type: schema.TypeInt, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "consumer_fetch_min_bytes": { - Description: "The minimum amount of data the server should return for a fetch request.", - Optional: true, - Type: schema.TypeInt, - }, - "producer_batch_size": { - Description: "The batch size in bytes producer will attempt to collect before publishing to broker.", - Optional: true, - Type: schema.TypeInt, - }, - "producer_buffer_memory": { - Description: "The amount of bytes producer can use for buffering data before publishing to broker.", - Optional: true, - Type: schema.TypeInt, - }, - "producer_compression_type": { - Description: "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", - Optional: true, - Type: schema.TypeString, - }, - "producer_linger_ms": { - Description: "The linger time (ms) for waiting new data to arrive for publishing.", - Optional: true, - Type: schema.TypeInt, - }, - "producer_max_request_size": { - Description: "The maximum request size in bytes.", - Optional: true, - Type: schema.TypeInt, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - } - - return &schema.Schema{ - Description: "KafkaMirrormaker user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// IntegrationTypeLogs is a generated function returning the schema of the logs IntegrationType. -func IntegrationTypeLogs() *schema.Schema { - s := map[string]*schema.Schema{ - "elasticsearch_index_days_max": { - Default: "3", - Description: "Elasticsearch index retention limit. The default value is `3`.", - Optional: true, - Type: schema.TypeInt, - }, - "elasticsearch_index_prefix": { - Default: "logs", - Description: "Elasticsearch index prefix. The default value is `logs`.", - Optional: true, - Type: schema.TypeString, - }, - "selected_log_fields": { - Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 5, - Optional: true, - Type: schema.TypeList, - }, - } - - return &schema.Schema{ - Description: "Logs user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// IntegrationTypeMetrics is a generated function returning the schema of the metrics IntegrationType. -func IntegrationTypeMetrics() *schema.Schema { - s := map[string]*schema.Schema{ - "database": { - Description: "Name of the database where to store metric datapoints. Only affects PostgreSQL destinations. Defaults to 'metrics'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.", - Optional: true, - Type: schema.TypeString, - }, - "retention_days": { - Description: "Number of days to keep old metrics. Only affects PostgreSQL destinations. Set to 0 for no automatic cleanup. Defaults to 30 days.", - Optional: true, - Type: schema.TypeInt, - }, - "ro_username": { - Description: "Name of a user that can be used to read metrics. This will be used for Grafana integration (if enabled) to prevent Grafana users from making undesired changes. Only affects PostgreSQL destinations. Defaults to 'metrics_reader'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.", - Optional: true, - Type: schema.TypeString, - }, - "source_mysql": { - Description: "Configuration options for metrics where source service is MySQL.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{"telegraf": { - Description: "Configuration options for Telegraf MySQL input plugin.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "gather_event_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_file_events_stats": { - Description: "gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_index_io_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_info_schema_auto_inc": { - Description: "Gather auto_increment columns and max values from information schema.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_innodb_metrics": { - Description: "Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_perf_events_statements": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_process_list": { - Description: "Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_slave_status": { - Description: "Gather metrics from SHOW SLAVE STATUS command output.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_table_io_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_table_lock_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_table_schema": { - Description: "Gather metrics from INFORMATION_SCHEMA.TABLES.", - Optional: true, - Type: schema.TypeBool, - }, - "perf_events_statements_digest_text_limit": { - Description: "Truncates digest text from perf_events_statements into this many characters.", - Optional: true, - Type: schema.TypeInt, - }, - "perf_events_statements_limit": { - Description: "Limits metrics from perf_events_statements.", - Optional: true, - Type: schema.TypeInt, - }, - "perf_events_statements_time_limit": { - Description: "Only include perf_events_statements whose last seen is less than this many seconds.", - Optional: true, - Type: schema.TypeInt, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "gather_event_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_file_events_stats": { - Description: "gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_index_io_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_info_schema_auto_inc": { - Description: "Gather auto_increment columns and max values from information schema.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_innodb_metrics": { - Description: "Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_perf_events_statements": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_process_list": { - Description: "Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_slave_status": { - Description: "Gather metrics from SHOW SLAVE STATUS command output.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_table_io_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_table_lock_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_table_schema": { - Description: "Gather metrics from INFORMATION_SCHEMA.TABLES.", - Optional: true, - Type: schema.TypeBool, - }, - "perf_events_statements_digest_text_limit": { - Description: "Truncates digest text from perf_events_statements into this many characters.", - Optional: true, - Type: schema.TypeInt, - }, - "perf_events_statements_limit": { - Description: "Limits metrics from perf_events_statements.", - Optional: true, - Type: schema.TypeInt, - }, - "perf_events_statements_time_limit": { - Description: "Only include perf_events_statements whose last seen is less than this many seconds.", - Optional: true, - Type: schema.TypeInt, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }}), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{"telegraf": { - Description: "Configuration options for Telegraf MySQL input plugin.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "gather_event_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_file_events_stats": { - Description: "gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_index_io_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_info_schema_auto_inc": { - Description: "Gather auto_increment columns and max values from information schema.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_innodb_metrics": { - Description: "Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_perf_events_statements": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_process_list": { - Description: "Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_slave_status": { - Description: "Gather metrics from SHOW SLAVE STATUS command output.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_table_io_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_table_lock_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_table_schema": { - Description: "Gather metrics from INFORMATION_SCHEMA.TABLES.", - Optional: true, - Type: schema.TypeBool, - }, - "perf_events_statements_digest_text_limit": { - Description: "Truncates digest text from perf_events_statements into this many characters.", - Optional: true, - Type: schema.TypeInt, - }, - "perf_events_statements_limit": { - Description: "Limits metrics from perf_events_statements.", - Optional: true, - Type: schema.TypeInt, - }, - "perf_events_statements_time_limit": { - Description: "Only include perf_events_statements whose last seen is less than this many seconds.", - Optional: true, - Type: schema.TypeInt, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "gather_event_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_file_events_stats": { - Description: "gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_index_io_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_info_schema_auto_inc": { - Description: "Gather auto_increment columns and max values from information schema.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_innodb_metrics": { - Description: "Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_perf_events_statements": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_process_list": { - Description: "Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_slave_status": { - Description: "Gather metrics from SHOW SLAVE STATUS command output.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_table_io_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_table_lock_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_table_schema": { - Description: "Gather metrics from INFORMATION_SCHEMA.TABLES.", - Optional: true, - Type: schema.TypeBool, - }, - "perf_events_statements_digest_text_limit": { - Description: "Truncates digest text from perf_events_statements into this many characters.", - Optional: true, - Type: schema.TypeInt, - }, - "perf_events_statements_limit": { - Description: "Limits metrics from perf_events_statements.", - Optional: true, - Type: schema.TypeInt, - }, - "perf_events_statements_time_limit": { - Description: "Only include perf_events_statements whose last seen is less than this many seconds.", - Optional: true, - Type: schema.TypeInt, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }}}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "username": { - Description: "Name of the user used to write metrics. Only affects PostgreSQL destinations. Defaults to 'metrics_writer'. Note that this must be the same for all metrics integrations that write data to the same PostgreSQL service.", - Optional: true, - Type: schema.TypeString, - }, - } - - return &schema.Schema{ - Description: "Metrics user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// IntegrationTypePrometheus is a generated function returning the schema of the prometheus IntegrationType. -func IntegrationTypePrometheus() *schema.Schema { - s := map[string]*schema.Schema{"source_mysql": { - Description: "Configuration options for metrics where source service is MySQL.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{"telegraf": { - Description: "Configuration options for Telegraf MySQL input plugin.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "gather_event_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_file_events_stats": { - Description: "gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_index_io_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_info_schema_auto_inc": { - Description: "Gather auto_increment columns and max values from information schema.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_innodb_metrics": { - Description: "Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_perf_events_statements": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_process_list": { - Description: "Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_slave_status": { - Description: "Gather metrics from SHOW SLAVE STATUS command output.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_table_io_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_table_lock_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_table_schema": { - Description: "Gather metrics from INFORMATION_SCHEMA.TABLES.", - Optional: true, - Type: schema.TypeBool, - }, - "perf_events_statements_digest_text_limit": { - Description: "Truncates digest text from perf_events_statements into this many characters.", - Optional: true, - Type: schema.TypeInt, - }, - "perf_events_statements_limit": { - Description: "Limits metrics from perf_events_statements.", - Optional: true, - Type: schema.TypeInt, - }, - "perf_events_statements_time_limit": { - Description: "Only include perf_events_statements whose last seen is less than this many seconds.", - Optional: true, - Type: schema.TypeInt, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "gather_event_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_file_events_stats": { - Description: "gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_index_io_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_info_schema_auto_inc": { - Description: "Gather auto_increment columns and max values from information schema.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_innodb_metrics": { - Description: "Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_perf_events_statements": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_process_list": { - Description: "Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_slave_status": { - Description: "Gather metrics from SHOW SLAVE STATUS command output.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_table_io_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_table_lock_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_table_schema": { - Description: "Gather metrics from INFORMATION_SCHEMA.TABLES.", - Optional: true, - Type: schema.TypeBool, - }, - "perf_events_statements_digest_text_limit": { - Description: "Truncates digest text from perf_events_statements into this many characters.", - Optional: true, - Type: schema.TypeInt, - }, - "perf_events_statements_limit": { - Description: "Limits metrics from perf_events_statements.", - Optional: true, - Type: schema.TypeInt, - }, - "perf_events_statements_time_limit": { - Description: "Only include perf_events_statements whose last seen is less than this many seconds.", - Optional: true, - Type: schema.TypeInt, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }}), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{"telegraf": { - Description: "Configuration options for Telegraf MySQL input plugin.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "gather_event_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_file_events_stats": { - Description: "gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_index_io_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_info_schema_auto_inc": { - Description: "Gather auto_increment columns and max values from information schema.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_innodb_metrics": { - Description: "Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_perf_events_statements": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_process_list": { - Description: "Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_slave_status": { - Description: "Gather metrics from SHOW SLAVE STATUS command output.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_table_io_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_table_lock_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_table_schema": { - Description: "Gather metrics from INFORMATION_SCHEMA.TABLES.", - Optional: true, - Type: schema.TypeBool, - }, - "perf_events_statements_digest_text_limit": { - Description: "Truncates digest text from perf_events_statements into this many characters.", - Optional: true, - Type: schema.TypeInt, - }, - "perf_events_statements_limit": { - Description: "Limits metrics from perf_events_statements.", - Optional: true, - Type: schema.TypeInt, - }, - "perf_events_statements_time_limit": { - Description: "Only include perf_events_statements whose last seen is less than this many seconds.", - Optional: true, - Type: schema.TypeInt, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "gather_event_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_file_events_stats": { - Description: "gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_index_io_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_info_schema_auto_inc": { - Description: "Gather auto_increment columns and max values from information schema.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_innodb_metrics": { - Description: "Gather metrics from INFORMATION_SCHEMA.INNODB_METRICS.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_perf_events_statements": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_process_list": { - Description: "Gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_slave_status": { - Description: "Gather metrics from SHOW SLAVE STATUS command output.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_table_io_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_table_lock_waits": { - Description: "Gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS.", - Optional: true, - Type: schema.TypeBool, - }, - "gather_table_schema": { - Description: "Gather metrics from INFORMATION_SCHEMA.TABLES.", - Optional: true, - Type: schema.TypeBool, - }, - "perf_events_statements_digest_text_limit": { - Description: "Truncates digest text from perf_events_statements into this many characters.", - Optional: true, - Type: schema.TypeInt, - }, - "perf_events_statements_limit": { - Description: "Limits metrics from perf_events_statements.", - Optional: true, - Type: schema.TypeInt, - }, - "perf_events_statements_time_limit": { - Description: "Only include perf_events_statements whose last seen is less than this many seconds.", - Optional: true, - Type: schema.TypeInt, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }}}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }} - - return &schema.Schema{ - Description: "Prometheus user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} diff --git a/internal/schemautil/userconfig/dist/service_types.go b/internal/schemautil/userconfig/dist/service_types.go deleted file mode 100644 index c4d52b442..000000000 --- a/internal/schemautil/userconfig/dist/service_types.go +++ /dev/null @@ -1,8970 +0,0 @@ -// Code generated by internal/schemautil/userconfig/userconfig_test.go; DO NOT EDIT. - -package dist - -import ( - schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - schemautil "github.com/aiven/terraform-provider-aiven/internal/schemautil" -) - -// ServiceTypeCassandra is a generated function returning the schema of the cassandra ServiceType. -func ServiceTypeCassandra() *schema.Schema { - s := map[string]*schema.Schema{ - "additional_backup_regions": { - Description: "Additional Cloud Regions for Backup Replication.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "backup_hour": { - Description: "The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed.", - Optional: true, - Type: schema.TypeInt, - }, - "backup_minute": { - Description: "The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed.", - Optional: true, - Type: schema.TypeInt, - }, - "cassandra": { - Description: "cassandra configuration values.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "batch_size_fail_threshold_in_kb": { - Description: "Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default.", - Optional: true, - Type: schema.TypeInt, - }, - "batch_size_warn_threshold_in_kb": { - Description: "Log a warning message on any multiple-partition batch size exceeding this value.5kb per batch by default.Caution should be taken on increasing the size of this thresholdas it can lead to node instability.", - Optional: true, - Type: schema.TypeInt, - }, - "datacenter": { - Description: "Name of the datacenter to which nodes of this service belong. Can be set only when creating the service.", - Optional: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "batch_size_fail_threshold_in_kb": { - Description: "Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default.", - Optional: true, - Type: schema.TypeInt, - }, - "batch_size_warn_threshold_in_kb": { - Description: "Log a warning message on any multiple-partition batch size exceeding this value.5kb per batch by default.Caution should be taken on increasing the size of this thresholdas it can lead to node instability.", - Optional: true, - Type: schema.TypeInt, - }, - "datacenter": { - Description: "Name of the datacenter to which nodes of this service belong. Can be set only when creating the service.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "cassandra_version": { - Description: "Cassandra version.", - Optional: true, - Type: schema.TypeString, - }, - "ip_filter": { - Deprecated: "This will be removed in v5.0.0 and replaced with ip_filter_string instead.", - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_object": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "description": { - Description: "Description for IP filter list entry.", - Optional: true, - Type: schema.TypeString, - }, - "network": { - Description: "CIDR address block.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_string": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "migrate_sstableloader": { - Description: "Sets the service into migration mode enabling the sstableloader utility to be used to upload Cassandra data files. Available only on service create.", - Optional: true, - Type: schema.TypeBool, - }, - "private_access": { - Description: "Allow access to selected service ports from private networks.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{"prometheus": { - Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }}), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{"prometheus": { - Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }}}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "project_to_fork_from": { - Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "public_access": { - Description: "Allow access to selected service ports from the public Internet.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{"prometheus": { - Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }}), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{"prometheus": { - Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }}}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "service_log": { - Description: "Store logs for the service so that they are available in the HTTP API and console.", - Optional: true, - Type: schema.TypeBool, - }, - "service_to_fork_from": { - Description: "Name of another service to fork from. This has effect only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "service_to_join_with": { - Description: "When bootstrapping, instead of creating a new Cassandra cluster try to join an existing one from another service. Can only be set on service creation.", - Optional: true, - Type: schema.TypeString, - }, - "static_ips": { - Description: "Use static public IP addresses.", - Optional: true, - Type: schema.TypeBool, - }, - } - - return &schema.Schema{ - Description: "Cassandra user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// ServiceTypeClickhouse is a generated function returning the schema of the clickhouse ServiceType. -func ServiceTypeClickhouse() *schema.Schema { - s := map[string]*schema.Schema{ - "additional_backup_regions": { - Description: "Additional Cloud Regions for Backup Replication.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter": { - Deprecated: "This will be removed in v5.0.0 and replaced with ip_filter_string instead.", - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_object": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "description": { - Description: "Description for IP filter list entry.", - Optional: true, - Type: schema.TypeString, - }, - "network": { - Description: "CIDR address block.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_string": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "private_access": { - Description: "Allow access to selected service ports from private networks.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "clickhouse": { - Description: "Allow clients to connect to clickhouse with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "clickhouse_https": { - Description: "Allow clients to connect to clickhouse_https with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "clickhouse_mysql": { - Description: "Allow clients to connect to clickhouse_mysql with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "clickhouse": { - Description: "Allow clients to connect to clickhouse with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "clickhouse_https": { - Description: "Allow clients to connect to clickhouse_https with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "clickhouse_mysql": { - Description: "Allow clients to connect to clickhouse_mysql with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "privatelink_access": { - Description: "Allow access to selected service components through Privatelink.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "clickhouse": { - Description: "Enable clickhouse.", - Optional: true, - Type: schema.TypeBool, - }, - "clickhouse_https": { - Description: "Enable clickhouse_https.", - Optional: true, - Type: schema.TypeBool, - }, - "clickhouse_mysql": { - Description: "Enable clickhouse_mysql.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Enable prometheus.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "clickhouse": { - Description: "Enable clickhouse.", - Optional: true, - Type: schema.TypeBool, - }, - "clickhouse_https": { - Description: "Enable clickhouse_https.", - Optional: true, - Type: schema.TypeBool, - }, - "clickhouse_mysql": { - Description: "Enable clickhouse_mysql.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Enable prometheus.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "project_to_fork_from": { - Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "public_access": { - Description: "Allow access to selected service ports from the public Internet.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "clickhouse": { - Description: "Allow clients to connect to clickhouse from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "clickhouse_https": { - Description: "Allow clients to connect to clickhouse_https from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "clickhouse_mysql": { - Description: "Allow clients to connect to clickhouse_mysql from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "clickhouse": { - Description: "Allow clients to connect to clickhouse from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "clickhouse_https": { - Description: "Allow clients to connect to clickhouse_https from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "clickhouse_mysql": { - Description: "Allow clients to connect to clickhouse_mysql from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "service_log": { - Description: "Store logs for the service so that they are available in the HTTP API and console.", - Optional: true, - Type: schema.TypeBool, - }, - "service_to_fork_from": { - Description: "Name of another service to fork from. This has effect only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "static_ips": { - Description: "Use static public IP addresses.", - Optional: true, - Type: schema.TypeBool, - }, - } - - return &schema.Schema{ - Description: "Clickhouse user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// ServiceTypeDragonfly is a generated function returning the schema of the dragonfly ServiceType. -func ServiceTypeDragonfly() *schema.Schema { - s := map[string]*schema.Schema{ - "cache_mode": { - Default: false, - Description: "Evict entries when getting close to maxmemory limit. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - "dragonfly_ssl": { - Default: true, - Description: "Require SSL to access Dragonfly. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "ip_filter": { - Deprecated: "This will be removed in v5.0.0 and replaced with ip_filter_string instead.", - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_object": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "description": { - Description: "Description for IP filter list entry.", - Optional: true, - Type: schema.TypeString, - }, - "network": { - Description: "CIDR address block.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_string": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "migration": { - Description: "Migrate data from existing server.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "dbname": { - Description: "Database name for bootstrapping the initial connection.", - Optional: true, - Type: schema.TypeString, - }, - "host": { - Description: "Hostname or IP address of the server where to migrate data from.", - Required: true, - Type: schema.TypeString, - }, - "ignore_dbs": { - Description: "Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).", - Optional: true, - Type: schema.TypeString, - }, - "method": { - Description: "The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).", - Optional: true, - Type: schema.TypeString, - }, - "password": { - Description: "Password for authentication with the server where to migrate data from.", - Optional: true, - Sensitive: true, - Type: schema.TypeString, - }, - "port": { - Description: "Port number of the server where to migrate data from.", - Required: true, - Type: schema.TypeInt, - }, - "ssl": { - Default: true, - Description: "The server where to migrate data from is secured with SSL. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "username": { - Description: "User name for authentication with the server where to migrate data from.", - Optional: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "dbname": { - Description: "Database name for bootstrapping the initial connection.", - Optional: true, - Type: schema.TypeString, - }, - "host": { - Description: "Hostname or IP address of the server where to migrate data from.", - Required: true, - Type: schema.TypeString, - }, - "ignore_dbs": { - Description: "Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).", - Optional: true, - Type: schema.TypeString, - }, - "method": { - Description: "The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).", - Optional: true, - Type: schema.TypeString, - }, - "password": { - Description: "Password for authentication with the server where to migrate data from.", - Optional: true, - Sensitive: true, - Type: schema.TypeString, - }, - "port": { - Description: "Port number of the server where to migrate data from.", - Required: true, - Type: schema.TypeInt, - }, - "ssl": { - Default: true, - Description: "The server where to migrate data from is secured with SSL. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "username": { - Description: "User name for authentication with the server where to migrate data from.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "private_access": { - Description: "Allow access to selected service ports from private networks.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "dragonfly": { - Description: "Allow clients to connect to dragonfly with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "dragonfly": { - Description: "Allow clients to connect to dragonfly with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "privatelink_access": { - Description: "Allow access to selected service components through Privatelink.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "dragonfly": { - Description: "Enable dragonfly.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Enable prometheus.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "dragonfly": { - Description: "Enable dragonfly.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Enable prometheus.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "project_to_fork_from": { - Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "public_access": { - Description: "Allow access to selected service ports from the public Internet.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "dragonfly": { - Description: "Allow clients to connect to dragonfly from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "dragonfly": { - Description: "Allow clients to connect to dragonfly from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "recovery_basebackup_name": { - Description: "Name of the basebackup to restore in forked service.", - Optional: true, - Type: schema.TypeString, - }, - "service_log": { - Description: "Store logs for the service so that they are available in the HTTP API and console.", - Optional: true, - Type: schema.TypeBool, - }, - "service_to_fork_from": { - Description: "Name of another service to fork from. This has effect only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "static_ips": { - Description: "Use static public IP addresses.", - Optional: true, - Type: schema.TypeBool, - }, - } - - return &schema.Schema{ - Description: "Dragonfly user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// ServiceTypeElasticsearch is a generated function returning the schema of the elasticsearch ServiceType. -func ServiceTypeElasticsearch() *schema.Schema { - s := map[string]*schema.Schema{ - "additional_backup_regions": { - Description: "Additional Cloud Regions for Backup Replication.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "custom_domain": { - Description: "Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.", - Optional: true, - Type: schema.TypeString, - }, - "disable_replication_factor_adjustment": { - Deprecated: "Usage of this field is discouraged.", - Description: "Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can no longer be activated.", - Optional: true, - Type: schema.TypeBool, - }, - "elasticsearch": { - Description: "Elasticsearch settings.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "action_auto_create_index_enabled": { - Description: "Explicitly allow or block automatic creation of indices. Defaults to true.", - Optional: true, - Type: schema.TypeBool, - }, - "action_destructive_requires_name": { - Description: "Require explicit index names when deleting.", - Optional: true, - Type: schema.TypeBool, - }, - "cluster_max_shards_per_node": { - Description: "Controls the number of shards allowed in the cluster per data node.", - Optional: true, - Type: schema.TypeInt, - }, - "cluster_routing_allocation_node_concurrent_recoveries": { - Description: "How many concurrent incoming/outgoing shard recoveries (normally replicas) are allowed to happen on a node. Defaults to 2.", - Optional: true, - Type: schema.TypeInt, - }, - "email_sender_name": { - Description: "This should be identical to the Sender name defined in Opensearch dashboards.", - Optional: true, - Type: schema.TypeString, - }, - "email_sender_password": { - Description: "Sender email password for Opensearch alerts to authenticate with SMTP server.", - Optional: true, - Sensitive: true, - Type: schema.TypeString, - }, - "email_sender_username": { - Description: "Sender email address for Opensearch alerts.", - Optional: true, - Type: schema.TypeString, - }, - "http_max_content_length": { - Description: "Maximum content length for HTTP requests to the Elasticsearch HTTP API, in bytes.", - Optional: true, - Type: schema.TypeInt, - }, - "http_max_header_size": { - Description: "The max size of allowed headers, in bytes.", - Optional: true, - Type: schema.TypeInt, - }, - "http_max_initial_line_length": { - Description: "The max length of an HTTP URL, in bytes.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_fielddata_cache_size": { - Description: "Relative amount. Maximum amount of heap memory used for field data cache. This is an expert setting; decreasing the value too much will increase overhead of loading field data; too much memory used for field data cache will decrease amount of heap available for other operations.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_memory_index_buffer_size": { - Description: "Percentage value. Default is 10%. Total amount of heap used for indexing buffer, before writing segments to disk. This is an expert setting. Too low value will slow down indexing; too high value will increase indexing performance but causes performance issues for query performance.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_queries_cache_size": { - Description: "Percentage value. Default is 10%. Maximum amount of heap used for query cache. This is an expert setting. Too low value will decrease query performance and increase performance for other operations; too high value will cause issues with other Elasticsearch functionality.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_query_bool_max_clause_count": { - Description: "Maximum number of clauses Lucene BooleanQuery can have. The default value (1024) is relatively high, and increasing it may cause performance issues. Investigate other approaches first before increasing this value.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_recovery_max_bytes_per_sec": { - Description: "Limits total inbound and outbound recovery traffic for each node. Applies to both peer recoveries as well as snapshot recoveries (i.e., restores from a snapshot). Defaults to 40mb.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_recovery_max_concurrent_file_chunks": { - Description: "Number of file chunks sent in parallel for each recovery. Defaults to 2.", - Optional: true, - Type: schema.TypeInt, - }, - "override_main_response_version": { - Description: "Compatibility mode sets OpenSearch to report its version as 7.10 so clients continue to work. Default is false.", - Optional: true, - Type: schema.TypeBool, - }, - "reindex_remote_whitelist": { - Description: "Whitelisted addresses for reindexing. Changing this value will cause all Elasticsearch instances to restart.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 32, - Optional: true, - Type: schema.TypeList, - }, - "script_max_compilations_rate": { - Description: "Script compilation circuit breaker limits the number of inline script compilations within a period of time. Default is use-context.", - Optional: true, - Type: schema.TypeString, - }, - "search_max_buckets": { - Description: "Maximum number of aggregation buckets allowed in a single response. Elasticsearch default value is used when this is not defined.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_analyze_queue_size": { - Description: "Size for the thread pool queue. See documentation for exact details.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_analyze_size": { - Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_force_merge_size": { - Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_get_queue_size": { - Description: "Size for the thread pool queue. See documentation for exact details.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_get_size": { - Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_search_queue_size": { - Description: "Size for the thread pool queue. See documentation for exact details.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_search_size": { - Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_search_throttled_queue_size": { - Description: "Size for the thread pool queue. See documentation for exact details.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_search_throttled_size": { - Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_write_queue_size": { - Description: "Size for the thread pool queue. See documentation for exact details.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_write_size": { - Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", - Optional: true, - Type: schema.TypeInt, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "action_auto_create_index_enabled": { - Description: "Explicitly allow or block automatic creation of indices. Defaults to true.", - Optional: true, - Type: schema.TypeBool, - }, - "action_destructive_requires_name": { - Description: "Require explicit index names when deleting.", - Optional: true, - Type: schema.TypeBool, - }, - "cluster_max_shards_per_node": { - Description: "Controls the number of shards allowed in the cluster per data node.", - Optional: true, - Type: schema.TypeInt, - }, - "cluster_routing_allocation_node_concurrent_recoveries": { - Description: "How many concurrent incoming/outgoing shard recoveries (normally replicas) are allowed to happen on a node. Defaults to 2.", - Optional: true, - Type: schema.TypeInt, - }, - "email_sender_name": { - Description: "This should be identical to the Sender name defined in Opensearch dashboards.", - Optional: true, - Type: schema.TypeString, - }, - "email_sender_password": { - Description: "Sender email password for Opensearch alerts to authenticate with SMTP server.", - Optional: true, - Sensitive: true, - Type: schema.TypeString, - }, - "email_sender_username": { - Description: "Sender email address for Opensearch alerts.", - Optional: true, - Type: schema.TypeString, - }, - "http_max_content_length": { - Description: "Maximum content length for HTTP requests to the Elasticsearch HTTP API, in bytes.", - Optional: true, - Type: schema.TypeInt, - }, - "http_max_header_size": { - Description: "The max size of allowed headers, in bytes.", - Optional: true, - Type: schema.TypeInt, - }, - "http_max_initial_line_length": { - Description: "The max length of an HTTP URL, in bytes.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_fielddata_cache_size": { - Description: "Relative amount. Maximum amount of heap memory used for field data cache. This is an expert setting; decreasing the value too much will increase overhead of loading field data; too much memory used for field data cache will decrease amount of heap available for other operations.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_memory_index_buffer_size": { - Description: "Percentage value. Default is 10%. Total amount of heap used for indexing buffer, before writing segments to disk. This is an expert setting. Too low value will slow down indexing; too high value will increase indexing performance but causes performance issues for query performance.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_queries_cache_size": { - Description: "Percentage value. Default is 10%. Maximum amount of heap used for query cache. This is an expert setting. Too low value will decrease query performance and increase performance for other operations; too high value will cause issues with other Elasticsearch functionality.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_query_bool_max_clause_count": { - Description: "Maximum number of clauses Lucene BooleanQuery can have. The default value (1024) is relatively high, and increasing it may cause performance issues. Investigate other approaches first before increasing this value.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_recovery_max_bytes_per_sec": { - Description: "Limits total inbound and outbound recovery traffic for each node. Applies to both peer recoveries as well as snapshot recoveries (i.e., restores from a snapshot). Defaults to 40mb.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_recovery_max_concurrent_file_chunks": { - Description: "Number of file chunks sent in parallel for each recovery. Defaults to 2.", - Optional: true, - Type: schema.TypeInt, - }, - "override_main_response_version": { - Description: "Compatibility mode sets OpenSearch to report its version as 7.10 so clients continue to work. Default is false.", - Optional: true, - Type: schema.TypeBool, - }, - "reindex_remote_whitelist": { - Description: "Whitelisted addresses for reindexing. Changing this value will cause all Elasticsearch instances to restart.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 32, - Optional: true, - Type: schema.TypeList, - }, - "script_max_compilations_rate": { - Description: "Script compilation circuit breaker limits the number of inline script compilations within a period of time. Default is use-context.", - Optional: true, - Type: schema.TypeString, - }, - "search_max_buckets": { - Description: "Maximum number of aggregation buckets allowed in a single response. Elasticsearch default value is used when this is not defined.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_analyze_queue_size": { - Description: "Size for the thread pool queue. See documentation for exact details.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_analyze_size": { - Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_force_merge_size": { - Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_get_queue_size": { - Description: "Size for the thread pool queue. See documentation for exact details.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_get_size": { - Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_search_queue_size": { - Description: "Size for the thread pool queue. See documentation for exact details.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_search_size": { - Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_search_throttled_queue_size": { - Description: "Size for the thread pool queue. See documentation for exact details.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_search_throttled_size": { - Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_write_queue_size": { - Description: "Size for the thread pool queue. See documentation for exact details.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_write_size": { - Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", - Optional: true, - Type: schema.TypeInt, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "elasticsearch_version": { - Description: "Elasticsearch major version.", - Optional: true, - Type: schema.TypeString, - }, - "index_patterns": { - Description: "Index patterns.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "max_index_count": { - Description: "Maximum number of indexes to keep.", - Required: true, - Type: schema.TypeInt, - }, - "pattern": { - Description: "fnmatch pattern.", - Required: true, - Type: schema.TypeString, - }, - "sorting_algorithm": { - Default: "creation_date", - Description: "Deletion sorting algorithm. The default value is `creation_date`.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 512, - Optional: true, - Type: schema.TypeList, - }, - "index_template": { - Description: "Template settings for all new indexes.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "mapping_nested_objects_limit": { - Description: "The maximum number of nested JSON objects that a single document can contain across all nested types. This limit helps to prevent out of memory errors when a document contains too many nested objects. Default is 10000.", - Optional: true, - Type: schema.TypeInt, - }, - "number_of_replicas": { - Description: "The number of replicas each primary shard has.", - Optional: true, - Type: schema.TypeInt, - }, - "number_of_shards": { - Description: "The number of primary shards that an index should have.", - Optional: true, - Type: schema.TypeInt, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "mapping_nested_objects_limit": { - Description: "The maximum number of nested JSON objects that a single document can contain across all nested types. This limit helps to prevent out of memory errors when a document contains too many nested objects. Default is 10000.", - Optional: true, - Type: schema.TypeInt, - }, - "number_of_replicas": { - Description: "The number of replicas each primary shard has.", - Optional: true, - Type: schema.TypeInt, - }, - "number_of_shards": { - Description: "The number of primary shards that an index should have.", - Optional: true, - Type: schema.TypeInt, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter": { - Deprecated: "This will be removed in v5.0.0 and replaced with ip_filter_string instead.", - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_object": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "description": { - Description: "Description for IP filter list entry.", - Optional: true, - Type: schema.TypeString, - }, - "network": { - Description: "CIDR address block.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_string": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "keep_index_refresh_interval": { - Description: "Aiven automation resets index.refresh_interval to default value for every index to be sure that indices are always visible to search. If it doesn't fit your case, you can disable this by setting up this flag to true.", - Optional: true, - Type: schema.TypeBool, - }, - "kibana": { - Description: "Kibana settings.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "elasticsearch_request_timeout": { - Default: "30000", - Description: "Timeout in milliseconds for requests made by Kibana towards Elasticsearch. The default value is `30000`.", - Optional: true, - Type: schema.TypeInt, - }, - "enabled": { - Default: true, - Description: "Enable or disable Kibana. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "max_old_space_size": { - Default: "128", - Description: "Limits the maximum amount of memory (in MiB) the Kibana process can use. This sets the max_old_space_size option of the nodejs running the Kibana. Note: the memory reserved by Kibana is not available for Elasticsearch. The default value is `128`.", - Optional: true, - Type: schema.TypeInt, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "elasticsearch_request_timeout": { - Default: "30000", - Description: "Timeout in milliseconds for requests made by Kibana towards Elasticsearch. The default value is `30000`.", - Optional: true, - Type: schema.TypeInt, - }, - "enabled": { - Default: true, - Description: "Enable or disable Kibana. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "max_old_space_size": { - Default: "128", - Description: "Limits the maximum amount of memory (in MiB) the Kibana process can use. This sets the max_old_space_size option of the nodejs running the Kibana. Note: the memory reserved by Kibana is not available for Elasticsearch. The default value is `128`.", - Optional: true, - Type: schema.TypeInt, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "max_index_count": { - Default: "0", - Deprecated: "Usage of this field is discouraged.", - Description: "Use index_patterns instead. The default value is `0`.", - Optional: true, - Type: schema.TypeInt, - }, - "opensearch_version": { - Description: "OpenSearch major version.", - Optional: true, - Type: schema.TypeString, - }, - "private_access": { - Description: "Allow access to selected service ports from private networks.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "elasticsearch": { - Description: "Allow clients to connect to elasticsearch with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "kibana": { - Description: "Allow clients to connect to kibana with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "elasticsearch": { - Description: "Allow clients to connect to elasticsearch with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "kibana": { - Description: "Allow clients to connect to kibana with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "privatelink_access": { - Description: "Allow access to selected service components through Privatelink.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "elasticsearch": { - Description: "Enable elasticsearch.", - Optional: true, - Type: schema.TypeBool, - }, - "kibana": { - Description: "Enable kibana.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Enable prometheus.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "elasticsearch": { - Description: "Enable elasticsearch.", - Optional: true, - Type: schema.TypeBool, - }, - "kibana": { - Description: "Enable kibana.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Enable prometheus.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "project_to_fork_from": { - Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "public_access": { - Description: "Allow access to selected service ports from the public Internet.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "elasticsearch": { - Description: "Allow clients to connect to elasticsearch from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "kibana": { - Description: "Allow clients to connect to kibana from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "elasticsearch": { - Description: "Allow clients to connect to elasticsearch from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "kibana": { - Description: "Allow clients to connect to kibana from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "recovery_basebackup_name": { - Description: "Name of the basebackup to restore in forked service.", - Optional: true, - Type: schema.TypeString, - }, - "service_to_fork_from": { - Description: "Name of another service to fork from. This has effect only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "static_ips": { - Description: "Use static public IP addresses.", - Optional: true, - Type: schema.TypeBool, - }, - } - - return &schema.Schema{ - Description: "Elasticsearch user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// ServiceTypeFlink is a generated function returning the schema of the flink ServiceType. -func ServiceTypeFlink() *schema.Schema { - s := map[string]*schema.Schema{ - "additional_backup_regions": { - Description: "Additional Cloud Regions for Backup Replication.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "flink_version": { - Description: "Flink major version.", - Optional: true, - Type: schema.TypeString, - }, - "ip_filter": { - Deprecated: "This will be removed in v5.0.0 and replaced with ip_filter_string instead.", - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_object": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "description": { - Description: "Description for IP filter list entry.", - Optional: true, - Type: schema.TypeString, - }, - "network": { - Description: "CIDR address block.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_string": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "number_of_task_slots": { - Description: "Task slots per node. For a 3 node plan, total number of task slots is 3x this value.", - Optional: true, - Type: schema.TypeInt, - }, - "privatelink_access": { - Description: "Allow access to selected service components through Privatelink.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "flink": { - Description: "Enable flink.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Enable prometheus.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "flink": { - Description: "Enable flink.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Enable prometheus.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "service_log": { - Description: "Store logs for the service so that they are available in the HTTP API and console.", - Optional: true, - Type: schema.TypeBool, - }, - "static_ips": { - Description: "Use static public IP addresses.", - Optional: true, - Type: schema.TypeBool, - }, - } - - return &schema.Schema{ - Description: "Flink user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// ServiceTypeGrafana is a generated function returning the schema of the grafana ServiceType. -func ServiceTypeGrafana() *schema.Schema { - s := map[string]*schema.Schema{ - "additional_backup_regions": { - Description: "Additional Cloud Regions for Backup Replication.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "alerting_enabled": { - Description: "Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled.", - Optional: true, - Type: schema.TypeBool, - }, - "alerting_error_or_timeout": { - Description: "Default error or timeout setting for new alerting rules.", - Optional: true, - Type: schema.TypeString, - }, - "alerting_max_annotations_to_keep": { - Description: "Max number of alert annotations that Grafana stores. 0 (default) keeps all alert annotations.", - Optional: true, - Type: schema.TypeInt, - }, - "alerting_nodata_or_nullvalues": { - Description: "Default value for 'no data or null values' for new alerting rules.", - Optional: true, - Type: schema.TypeString, - }, - "allow_embedding": { - Description: "Allow embedding Grafana dashboards with iframe/frame/object/embed tags. Disabled by default to limit impact of clickjacking.", - Optional: true, - Type: schema.TypeBool, - }, - "auth_azuread": { - Description: "Azure AD OAuth integration.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "allow_sign_up": { - Description: "Automatically sign-up users on successful sign-in.", - Optional: true, - Type: schema.TypeBool, - }, - "allowed_domains": { - Description: "Allowed domains.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 50, - Optional: true, - Type: schema.TypeList, - }, - "allowed_groups": { - Description: "Require users to belong to one of given groups.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 50, - Optional: true, - Type: schema.TypeList, - }, - "auth_url": { - Description: "Authorization URL.", - Required: true, - Type: schema.TypeString, - }, - "client_id": { - Description: "Client ID from provider.", - Required: true, - Type: schema.TypeString, - }, - "client_secret": { - Description: "Client secret from provider.", - Required: true, - Type: schema.TypeString, - }, - "token_url": { - Description: "Token URL.", - Required: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "allow_sign_up": { - Description: "Automatically sign-up users on successful sign-in.", - Optional: true, - Type: schema.TypeBool, - }, - "allowed_domains": { - Description: "Allowed domains.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 50, - Optional: true, - Type: schema.TypeList, - }, - "allowed_groups": { - Description: "Require users to belong to one of given groups.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 50, - Optional: true, - Type: schema.TypeList, - }, - "auth_url": { - Description: "Authorization URL.", - Required: true, - Type: schema.TypeString, - }, - "client_id": { - Description: "Client ID from provider.", - Required: true, - Type: schema.TypeString, - }, - "client_secret": { - Description: "Client secret from provider.", - Required: true, - Type: schema.TypeString, - }, - "token_url": { - Description: "Token URL.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "auth_basic_enabled": { - Description: "Enable or disable basic authentication form, used by Grafana built-in login.", - Optional: true, - Type: schema.TypeBool, - }, - "auth_generic_oauth": { - Description: "Generic OAuth integration.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "allow_sign_up": { - Description: "Automatically sign-up users on successful sign-in.", - Optional: true, - Type: schema.TypeBool, - }, - "allowed_domains": { - Description: "Allowed domains.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 50, - Optional: true, - Type: schema.TypeList, - }, - "allowed_organizations": { - Description: "Require user to be member of one of the listed organizations.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 50, - Optional: true, - Type: schema.TypeList, - }, - "api_url": { - Description: "API URL.", - Required: true, - Type: schema.TypeString, - }, - "auth_url": { - Description: "Authorization URL.", - Required: true, - Type: schema.TypeString, - }, - "auto_login": { - Description: "Allow users to bypass the login screen and automatically log in.", - Optional: true, - Type: schema.TypeBool, - }, - "client_id": { - Description: "Client ID from provider.", - Required: true, - Type: schema.TypeString, - }, - "client_secret": { - Description: "Client secret from provider.", - Required: true, - Type: schema.TypeString, - }, - "name": { - Description: "Name of the OAuth integration.", - Optional: true, - Type: schema.TypeString, - }, - "scopes": { - Description: "OAuth scopes.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 50, - Optional: true, - Type: schema.TypeList, - }, - "token_url": { - Description: "Token URL.", - Required: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "allow_sign_up": { - Description: "Automatically sign-up users on successful sign-in.", - Optional: true, - Type: schema.TypeBool, - }, - "allowed_domains": { - Description: "Allowed domains.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 50, - Optional: true, - Type: schema.TypeList, - }, - "allowed_organizations": { - Description: "Require user to be member of one of the listed organizations.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 50, - Optional: true, - Type: schema.TypeList, - }, - "api_url": { - Description: "API URL.", - Required: true, - Type: schema.TypeString, - }, - "auth_url": { - Description: "Authorization URL.", - Required: true, - Type: schema.TypeString, - }, - "auto_login": { - Description: "Allow users to bypass the login screen and automatically log in.", - Optional: true, - Type: schema.TypeBool, - }, - "client_id": { - Description: "Client ID from provider.", - Required: true, - Type: schema.TypeString, - }, - "client_secret": { - Description: "Client secret from provider.", - Required: true, - Type: schema.TypeString, - }, - "name": { - Description: "Name of the OAuth integration.", - Optional: true, - Type: schema.TypeString, - }, - "scopes": { - Description: "OAuth scopes.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 50, - Optional: true, - Type: schema.TypeList, - }, - "token_url": { - Description: "Token URL.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "auth_github": { - Description: "Github Auth integration.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "allow_sign_up": { - Description: "Automatically sign-up users on successful sign-in.", - Optional: true, - Type: schema.TypeBool, - }, - "allowed_organizations": { - Description: "Require users to belong to one of given organizations.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 50, - Optional: true, - Type: schema.TypeList, - }, - "auto_login": { - Description: "Allow users to bypass the login screen and automatically log in.", - Optional: true, - Type: schema.TypeBool, - }, - "client_id": { - Description: "Client ID from provider.", - Required: true, - Type: schema.TypeString, - }, - "client_secret": { - Description: "Client secret from provider.", - Required: true, - Type: schema.TypeString, - }, - "skip_org_role_sync": { - Description: "Stop automatically syncing user roles.", - Optional: true, - Type: schema.TypeBool, - }, - "team_ids": { - Description: "Require users to belong to one of given team IDs.", - Elem: &schema.Schema{Type: schema.TypeInt}, - MaxItems: 50, - Optional: true, - Type: schema.TypeList, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "allow_sign_up": { - Description: "Automatically sign-up users on successful sign-in.", - Optional: true, - Type: schema.TypeBool, - }, - "allowed_organizations": { - Description: "Require users to belong to one of given organizations.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 50, - Optional: true, - Type: schema.TypeList, - }, - "auto_login": { - Description: "Allow users to bypass the login screen and automatically log in.", - Optional: true, - Type: schema.TypeBool, - }, - "client_id": { - Description: "Client ID from provider.", - Required: true, - Type: schema.TypeString, - }, - "client_secret": { - Description: "Client secret from provider.", - Required: true, - Type: schema.TypeString, - }, - "skip_org_role_sync": { - Description: "Stop automatically syncing user roles.", - Optional: true, - Type: schema.TypeBool, - }, - "team_ids": { - Description: "Require users to belong to one of given team IDs.", - Elem: &schema.Schema{Type: schema.TypeInt}, - MaxItems: 50, - Optional: true, - Type: schema.TypeList, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "auth_gitlab": { - Description: "GitLab Auth integration.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "allow_sign_up": { - Description: "Automatically sign-up users on successful sign-in.", - Optional: true, - Type: schema.TypeBool, - }, - "allowed_groups": { - Description: "Require users to belong to one of given groups.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 50, - Optional: true, - Type: schema.TypeList, - }, - "api_url": { - Description: "API URL. This only needs to be set when using self hosted GitLab.", - Optional: true, - Type: schema.TypeString, - }, - "auth_url": { - Description: "Authorization URL. This only needs to be set when using self hosted GitLab.", - Optional: true, - Type: schema.TypeString, - }, - "client_id": { - Description: "Client ID from provider.", - Required: true, - Type: schema.TypeString, - }, - "client_secret": { - Description: "Client secret from provider.", - Required: true, - Type: schema.TypeString, - }, - "token_url": { - Description: "Token URL. This only needs to be set when using self hosted GitLab.", - Optional: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "allow_sign_up": { - Description: "Automatically sign-up users on successful sign-in.", - Optional: true, - Type: schema.TypeBool, - }, - "allowed_groups": { - Description: "Require users to belong to one of given groups.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 50, - Optional: true, - Type: schema.TypeList, - }, - "api_url": { - Description: "API URL. This only needs to be set when using self hosted GitLab.", - Optional: true, - Type: schema.TypeString, - }, - "auth_url": { - Description: "Authorization URL. This only needs to be set when using self hosted GitLab.", - Optional: true, - Type: schema.TypeString, - }, - "client_id": { - Description: "Client ID from provider.", - Required: true, - Type: schema.TypeString, - }, - "client_secret": { - Description: "Client secret from provider.", - Required: true, - Type: schema.TypeString, - }, - "token_url": { - Description: "Token URL. This only needs to be set when using self hosted GitLab.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "auth_google": { - Description: "Google Auth integration.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "allow_sign_up": { - Description: "Automatically sign-up users on successful sign-in.", - Optional: true, - Type: schema.TypeBool, - }, - "allowed_domains": { - Description: "Domains allowed to sign-in to this Grafana.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 64, - Optional: true, - Type: schema.TypeList, - }, - "client_id": { - Description: "Client ID from provider.", - Required: true, - Type: schema.TypeString, - }, - "client_secret": { - Description: "Client secret from provider.", - Required: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "allow_sign_up": { - Description: "Automatically sign-up users on successful sign-in.", - Optional: true, - Type: schema.TypeBool, - }, - "allowed_domains": { - Description: "Domains allowed to sign-in to this Grafana.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 64, - Optional: true, - Type: schema.TypeList, - }, - "client_id": { - Description: "Client ID from provider.", - Required: true, - Type: schema.TypeString, - }, - "client_secret": { - Description: "Client secret from provider.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "cookie_samesite": { - Description: "Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value.", - Optional: true, - Type: schema.TypeString, - }, - "custom_domain": { - Description: "Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.", - Optional: true, - Type: schema.TypeString, - }, - "dashboard_previews_enabled": { - Description: "This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering.", - Optional: true, - Type: schema.TypeBool, - }, - "dashboards_min_refresh_interval": { - Description: "Signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s, 1h.", - Optional: true, - Type: schema.TypeString, - }, - "dashboards_versions_to_keep": { - Description: "Dashboard versions to keep per dashboard.", - Optional: true, - Type: schema.TypeInt, - }, - "dataproxy_send_user_header": { - Description: "Send 'X-Grafana-User' header to data source.", - Optional: true, - Type: schema.TypeBool, - }, - "dataproxy_timeout": { - Description: "Timeout for data proxy requests in seconds.", - Optional: true, - Type: schema.TypeInt, - }, - "date_formats": { - Description: "Grafana date format specifications.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "default_timezone": { - Description: "Default time zone for user preferences. Value 'browser' uses browser local time zone.", - Optional: true, - Type: schema.TypeString, - }, - "full_date": { - Description: "Moment.js style format string for cases where full date is shown.", - Optional: true, - Type: schema.TypeString, - }, - "interval_day": { - Description: "Moment.js style format string used when a time requiring day accuracy is shown.", - Optional: true, - Type: schema.TypeString, - }, - "interval_hour": { - Description: "Moment.js style format string used when a time requiring hour accuracy is shown.", - Optional: true, - Type: schema.TypeString, - }, - "interval_minute": { - Description: "Moment.js style format string used when a time requiring minute accuracy is shown.", - Optional: true, - Type: schema.TypeString, - }, - "interval_month": { - Description: "Moment.js style format string used when a time requiring month accuracy is shown.", - Optional: true, - Type: schema.TypeString, - }, - "interval_second": { - Description: "Moment.js style format string used when a time requiring second accuracy is shown.", - Optional: true, - Type: schema.TypeString, - }, - "interval_year": { - Description: "Moment.js style format string used when a time requiring year accuracy is shown.", - Optional: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "default_timezone": { - Description: "Default time zone for user preferences. Value 'browser' uses browser local time zone.", - Optional: true, - Type: schema.TypeString, - }, - "full_date": { - Description: "Moment.js style format string for cases where full date is shown.", - Optional: true, - Type: schema.TypeString, - }, - "interval_day": { - Description: "Moment.js style format string used when a time requiring day accuracy is shown.", - Optional: true, - Type: schema.TypeString, - }, - "interval_hour": { - Description: "Moment.js style format string used when a time requiring hour accuracy is shown.", - Optional: true, - Type: schema.TypeString, - }, - "interval_minute": { - Description: "Moment.js style format string used when a time requiring minute accuracy is shown.", - Optional: true, - Type: schema.TypeString, - }, - "interval_month": { - Description: "Moment.js style format string used when a time requiring month accuracy is shown.", - Optional: true, - Type: schema.TypeString, - }, - "interval_second": { - Description: "Moment.js style format string used when a time requiring second accuracy is shown.", - Optional: true, - Type: schema.TypeString, - }, - "interval_year": { - Description: "Moment.js style format string used when a time requiring year accuracy is shown.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "disable_gravatar": { - Description: "Set to true to disable gravatar. Defaults to false (gravatar is enabled).", - Optional: true, - Type: schema.TypeBool, - }, - "editors_can_admin": { - Description: "Editors can manage folders, teams and dashboards created by them.", - Optional: true, - Type: schema.TypeBool, - }, - "external_image_storage": { - Description: "External image store settings.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "access_key": { - Description: "S3 access key. Requires permissions to the S3 bucket for the s3:PutObject and s3:PutObjectAcl actions.", - Required: true, - Type: schema.TypeString, - }, - "bucket_url": { - Description: "Bucket URL for S3.", - Required: true, - Type: schema.TypeString, - }, - "provider": { - Description: "Provider type.", - Required: true, - Type: schema.TypeString, - }, - "secret_key": { - Description: "S3 secret key.", - Required: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "access_key": { - Description: "S3 access key. Requires permissions to the S3 bucket for the s3:PutObject and s3:PutObjectAcl actions.", - Required: true, - Type: schema.TypeString, - }, - "bucket_url": { - Description: "Bucket URL for S3.", - Required: true, - Type: schema.TypeString, - }, - "provider": { - Description: "Provider type.", - Required: true, - Type: schema.TypeString, - }, - "secret_key": { - Description: "S3 secret key.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "google_analytics_ua_id": { - Description: "Google Analytics ID.", - Optional: true, - Type: schema.TypeString, - }, - "ip_filter": { - Deprecated: "This will be removed in v5.0.0 and replaced with ip_filter_string instead.", - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_object": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "description": { - Description: "Description for IP filter list entry.", - Optional: true, - Type: schema.TypeString, - }, - "network": { - Description: "CIDR address block.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_string": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "metrics_enabled": { - Description: "Enable Grafana /metrics endpoint.", - Optional: true, - Type: schema.TypeBool, - }, - "oauth_allow_insecure_email_lookup": { - Description: "Enforce user lookup based on email instead of the unique ID provided by the IdP.", - Optional: true, - Type: schema.TypeBool, - }, - "private_access": { - Description: "Allow access to selected service ports from private networks.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{"grafana": { - Description: "Allow clients to connect to grafana with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }}), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{"grafana": { - Description: "Allow clients to connect to grafana with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }}}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "privatelink_access": { - Description: "Allow access to selected service components through Privatelink.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{"grafana": { - Description: "Enable grafana.", - Optional: true, - Type: schema.TypeBool, - }}), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{"grafana": { - Description: "Enable grafana.", - Optional: true, - Type: schema.TypeBool, - }}}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "project_to_fork_from": { - Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "public_access": { - Description: "Allow access to selected service ports from the public Internet.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{"grafana": { - Description: "Allow clients to connect to grafana from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }}), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{"grafana": { - Description: "Allow clients to connect to grafana from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }}}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "recovery_basebackup_name": { - Description: "Name of the basebackup to restore in forked service.", - Optional: true, - Type: schema.TypeString, - }, - "service_log": { - Description: "Store logs for the service so that they are available in the HTTP API and console.", - Optional: true, - Type: schema.TypeBool, - }, - "service_to_fork_from": { - Description: "Name of another service to fork from. This has effect only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "smtp_server": { - Description: "SMTP server settings.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "from_address": { - Description: "Address used for sending emails.", - Required: true, - Type: schema.TypeString, - }, - "from_name": { - Description: "Name used in outgoing emails, defaults to Grafana.", - Optional: true, - Type: schema.TypeString, - }, - "host": { - Description: "Server hostname or IP.", - Required: true, - Type: schema.TypeString, - }, - "password": { - Description: "Password for SMTP authentication.", - Optional: true, - Sensitive: true, - Type: schema.TypeString, - }, - "port": { - Description: "SMTP server port.", - Required: true, - Type: schema.TypeInt, - }, - "skip_verify": { - Description: "Skip verifying server certificate. Defaults to false.", - Optional: true, - Type: schema.TypeBool, - }, - "starttls_policy": { - Description: "Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.", - Optional: true, - Type: schema.TypeString, - }, - "username": { - Description: "Username for SMTP authentication.", - Optional: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "from_address": { - Description: "Address used for sending emails.", - Required: true, - Type: schema.TypeString, - }, - "from_name": { - Description: "Name used in outgoing emails, defaults to Grafana.", - Optional: true, - Type: schema.TypeString, - }, - "host": { - Description: "Server hostname or IP.", - Required: true, - Type: schema.TypeString, - }, - "password": { - Description: "Password for SMTP authentication.", - Optional: true, - Sensitive: true, - Type: schema.TypeString, - }, - "port": { - Description: "SMTP server port.", - Required: true, - Type: schema.TypeInt, - }, - "skip_verify": { - Description: "Skip verifying server certificate. Defaults to false.", - Optional: true, - Type: schema.TypeBool, - }, - "starttls_policy": { - Description: "Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.", - Optional: true, - Type: schema.TypeString, - }, - "username": { - Description: "Username for SMTP authentication.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "static_ips": { - Description: "Use static public IP addresses.", - Optional: true, - Type: schema.TypeBool, - }, - "unified_alerting_enabled": { - Description: "Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details.", - Optional: true, - Type: schema.TypeBool, - }, - "user_auto_assign_org": { - Description: "Auto-assign new users on signup to main organization. Defaults to false.", - Optional: true, - Type: schema.TypeBool, - }, - "user_auto_assign_org_role": { - Description: "Set role for new signups. Defaults to Viewer.", - Optional: true, - Type: schema.TypeString, - }, - "viewers_can_edit": { - Description: "Users with view-only permission can edit but not save dashboards.", - Optional: true, - Type: schema.TypeBool, - }, - } - - return &schema.Schema{ - Description: "Grafana user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// ServiceTypeInfluxdb is a generated function returning the schema of the influxdb ServiceType. -func ServiceTypeInfluxdb() *schema.Schema { - s := map[string]*schema.Schema{ - "additional_backup_regions": { - Description: "Additional Cloud Regions for Backup Replication.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "custom_domain": { - Description: "Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.", - Optional: true, - Type: schema.TypeString, - }, - "influxdb": { - Description: "influxdb.conf configuration values.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "log_queries_after": { - Description: "The maximum duration in seconds before a query is logged as a slow query. Setting this to 0 (the default) will never log slow queries.", - Optional: true, - Type: schema.TypeInt, - }, - "max_connection_limit": { - Description: "Maximum number of connections to InfluxDB. Setting this to 0 (default) means no limit. If using max_connection_limit, it is recommended to set the value to be large enough in order to not block clients unnecessarily.", - Optional: true, - Type: schema.TypeInt, - }, - "max_row_limit": { - Description: "The maximum number of rows returned in a non-chunked query. Setting this to 0 (the default) allows an unlimited number to be returned.", - Optional: true, - Type: schema.TypeInt, - }, - "max_select_buckets": { - Description: "The maximum number of `GROUP BY time()` buckets that can be processed in a query. Setting this to 0 (the default) allows an unlimited number to be processed.", - Optional: true, - Type: schema.TypeInt, - }, - "max_select_point": { - Description: "The maximum number of points that can be processed in a SELECT statement. Setting this to 0 (the default) allows an unlimited number to be processed.", - Optional: true, - Type: schema.TypeInt, - }, - "query_log_enabled": { - Description: "Whether queries should be logged before execution. May log sensitive data contained within a query.", - Optional: true, - Type: schema.TypeBool, - }, - "query_timeout": { - Description: "The maximum duration in seconds before a query is killed. Setting this to 0 (the default) will never kill slow queries.", - Optional: true, - Type: schema.TypeInt, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "log_queries_after": { - Description: "The maximum duration in seconds before a query is logged as a slow query. Setting this to 0 (the default) will never log slow queries.", - Optional: true, - Type: schema.TypeInt, - }, - "max_connection_limit": { - Description: "Maximum number of connections to InfluxDB. Setting this to 0 (default) means no limit. If using max_connection_limit, it is recommended to set the value to be large enough in order to not block clients unnecessarily.", - Optional: true, - Type: schema.TypeInt, - }, - "max_row_limit": { - Description: "The maximum number of rows returned in a non-chunked query. Setting this to 0 (the default) allows an unlimited number to be returned.", - Optional: true, - Type: schema.TypeInt, - }, - "max_select_buckets": { - Description: "The maximum number of `GROUP BY time()` buckets that can be processed in a query. Setting this to 0 (the default) allows an unlimited number to be processed.", - Optional: true, - Type: schema.TypeInt, - }, - "max_select_point": { - Description: "The maximum number of points that can be processed in a SELECT statement. Setting this to 0 (the default) allows an unlimited number to be processed.", - Optional: true, - Type: schema.TypeInt, - }, - "query_log_enabled": { - Description: "Whether queries should be logged before execution. May log sensitive data contained within a query.", - Optional: true, - Type: schema.TypeBool, - }, - "query_timeout": { - Description: "The maximum duration in seconds before a query is killed. Setting this to 0 (the default) will never kill slow queries.", - Optional: true, - Type: schema.TypeInt, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter": { - Deprecated: "This will be removed in v5.0.0 and replaced with ip_filter_string instead.", - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_object": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "description": { - Description: "Description for IP filter list entry.", - Optional: true, - Type: schema.TypeString, - }, - "network": { - Description: "CIDR address block.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_string": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "private_access": { - Description: "Allow access to selected service ports from private networks.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{"influxdb": { - Description: "Allow clients to connect to influxdb with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }}), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{"influxdb": { - Description: "Allow clients to connect to influxdb with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }}}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "privatelink_access": { - Description: "Allow access to selected service components through Privatelink.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{"influxdb": { - Description: "Enable influxdb.", - Optional: true, - Type: schema.TypeBool, - }}), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{"influxdb": { - Description: "Enable influxdb.", - Optional: true, - Type: schema.TypeBool, - }}}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "project_to_fork_from": { - Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "public_access": { - Description: "Allow access to selected service ports from the public Internet.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{"influxdb": { - Description: "Allow clients to connect to influxdb from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }}), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{"influxdb": { - Description: "Allow clients to connect to influxdb from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }}}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "recovery_basebackup_name": { - Description: "Name of the basebackup to restore in forked service.", - Optional: true, - Type: schema.TypeString, - }, - "service_log": { - Description: "Store logs for the service so that they are available in the HTTP API and console.", - Optional: true, - Type: schema.TypeBool, - }, - "service_to_fork_from": { - Description: "Name of another service to fork from. This has effect only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "static_ips": { - Description: "Use static public IP addresses.", - Optional: true, - Type: schema.TypeBool, - }, - } - - return &schema.Schema{ - Description: "Influxdb user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// ServiceTypeKafka is a generated function returning the schema of the kafka ServiceType. -func ServiceTypeKafka() *schema.Schema { - s := map[string]*schema.Schema{ - "additional_backup_regions": { - Description: "Additional Cloud Regions for Backup Replication.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "aiven_kafka_topic_messages": { - Description: "Allow access to read Kafka topic messages in the Aiven Console and REST API.", - Optional: true, - Type: schema.TypeBool, - }, - "custom_domain": { - Description: "Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.", - Optional: true, - Type: schema.TypeString, - }, - "ip_filter": { - Deprecated: "This will be removed in v5.0.0 and replaced with ip_filter_string instead.", - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_object": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "description": { - Description: "Description for IP filter list entry.", - Optional: true, - Type: schema.TypeString, - }, - "network": { - Description: "CIDR address block.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_string": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "kafka": { - Description: "Kafka broker configuration values.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "auto_create_topics_enable": { - Description: "Enable auto creation of topics.", - Optional: true, - Type: schema.TypeBool, - }, - "compression_type": { - Description: "Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.", - Optional: true, - Type: schema.TypeString, - }, - "connections_max_idle_ms": { - Description: "Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.", - Optional: true, - Type: schema.TypeInt, - }, - "default_replication_factor": { - Description: "Replication factor for autocreated topics.", - Optional: true, - Type: schema.TypeInt, - }, - "group_initial_rebalance_delay_ms": { - Description: "The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.", - Optional: true, - Type: schema.TypeInt, - }, - "group_max_session_timeout_ms": { - Description: "The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.", - Optional: true, - Type: schema.TypeInt, - }, - "group_min_session_timeout_ms": { - Description: "The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.", - Optional: true, - Type: schema.TypeInt, - }, - "log_cleaner_delete_retention_ms": { - Description: "How long are delete records retained?.", - Optional: true, - Type: schema.TypeInt, - }, - "log_cleaner_max_compaction_lag_ms": { - Description: "The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.", - Optional: true, - Type: schema.TypeInt, - }, - "log_cleaner_min_cleanable_ratio": { - Description: "Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.", - Optional: true, - Type: schema.TypeFloat, - }, - "log_cleaner_min_compaction_lag_ms": { - Description: "The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.", - Optional: true, - Type: schema.TypeInt, - }, - "log_cleanup_policy": { - Description: "The default cleanup policy for segments beyond the retention window.", - Optional: true, - Type: schema.TypeString, - }, - "log_flush_interval_messages": { - Description: "The number of messages accumulated on a log partition before messages are flushed to disk.", - Optional: true, - Type: schema.TypeInt, - }, - "log_flush_interval_ms": { - Description: "The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.", - Optional: true, - Type: schema.TypeInt, - }, - "log_index_interval_bytes": { - Description: "The interval with which Kafka adds an entry to the offset index.", - Optional: true, - Type: schema.TypeInt, - }, - "log_index_size_max_bytes": { - Description: "The maximum size in bytes of the offset index.", - Optional: true, - Type: schema.TypeInt, - }, - "log_local_retention_bytes": { - Description: "The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.", - Optional: true, - Type: schema.TypeInt, - }, - "log_local_retention_ms": { - Description: "The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.", - Optional: true, - Type: schema.TypeInt, - }, - "log_message_downconversion_enable": { - Description: "This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .", - Optional: true, - Type: schema.TypeBool, - }, - "log_message_timestamp_difference_max_ms": { - Description: "The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.", - Optional: true, - Type: schema.TypeInt, - }, - "log_message_timestamp_type": { - Description: "Define whether the timestamp in the message is message create time or log append time.", - Optional: true, - Type: schema.TypeString, - }, - "log_preallocate": { - Description: "Should pre allocate file when create new segment?.", - Optional: true, - Type: schema.TypeBool, - }, - "log_retention_bytes": { - Description: "The maximum size of the log before deleting messages.", - Optional: true, - Type: schema.TypeInt, - }, - "log_retention_hours": { - Description: "The number of hours to keep a log file before deleting it.", - Optional: true, - Type: schema.TypeInt, - }, - "log_retention_ms": { - Description: "The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.", - Optional: true, - Type: schema.TypeInt, - }, - "log_roll_jitter_ms": { - Description: "The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.", - Optional: true, - Type: schema.TypeInt, - }, - "log_roll_ms": { - Description: "The maximum time before a new log segment is rolled out (in milliseconds).", - Optional: true, - Type: schema.TypeInt, - }, - "log_segment_bytes": { - Description: "The maximum size of a single log file.", - Optional: true, - Type: schema.TypeInt, - }, - "log_segment_delete_delay_ms": { - Description: "The amount of time to wait before deleting a file from the filesystem.", - Optional: true, - Type: schema.TypeInt, - }, - "max_connections_per_ip": { - Description: "The maximum number of connections allowed from each ip address (defaults to 2147483647).", - Optional: true, - Type: schema.TypeInt, - }, - "max_incremental_fetch_session_cache_slots": { - Description: "The maximum number of incremental fetch sessions that the broker will maintain.", - Optional: true, - Type: schema.TypeInt, - }, - "message_max_bytes": { - Description: "The maximum size of message that the server can receive.", - Optional: true, - Type: schema.TypeInt, - }, - "min_insync_replicas": { - Description: "When a producer sets acks to 'all' (or '-1'), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.", - Optional: true, - Type: schema.TypeInt, - }, - "num_partitions": { - Description: "Number of partitions for autocreated topics.", - Optional: true, - Type: schema.TypeInt, - }, - "offsets_retention_minutes": { - Description: "Log retention window in minutes for offsets topic.", - Optional: true, - Type: schema.TypeInt, - }, - "producer_purgatory_purge_interval_requests": { - Description: "The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).", - Optional: true, - Type: schema.TypeInt, - }, - "replica_fetch_max_bytes": { - Description: "The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.", - Optional: true, - Type: schema.TypeInt, - }, - "replica_fetch_response_max_bytes": { - Description: "Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.", - Optional: true, - Type: schema.TypeInt, - }, - "sasl_oauthbearer_expected_audience": { - Description: "The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.", - Optional: true, - Type: schema.TypeString, - }, - "sasl_oauthbearer_expected_issuer": { - Description: "Optional setting for the broker to use to verify that the JWT was created by the expected issuer.", - Optional: true, - Type: schema.TypeString, - }, - "sasl_oauthbearer_jwks_endpoint_url": { - Description: "OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. .", - Optional: true, - Type: schema.TypeString, - }, - "sasl_oauthbearer_sub_claim_name": { - Description: "Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.", - Optional: true, - Type: schema.TypeString, - }, - "socket_request_max_bytes": { - Description: "The maximum number of bytes in a socket request (defaults to 104857600).", - Optional: true, - Type: schema.TypeInt, - }, - "transaction_partition_verification_enable": { - Description: "Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.", - Optional: true, - Type: schema.TypeBool, - }, - "transaction_remove_expired_transaction_cleanup_interval_ms": { - Description: "The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).", - Optional: true, - Type: schema.TypeInt, - }, - "transaction_state_log_segment_bytes": { - Description: "The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).", - Optional: true, - Type: schema.TypeInt, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "auto_create_topics_enable": { - Description: "Enable auto creation of topics.", - Optional: true, - Type: schema.TypeBool, - }, - "compression_type": { - Description: "Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.", - Optional: true, - Type: schema.TypeString, - }, - "connections_max_idle_ms": { - Description: "Idle connections timeout: the server socket processor threads close the connections that idle for longer than this.", - Optional: true, - Type: schema.TypeInt, - }, - "default_replication_factor": { - Description: "Replication factor for autocreated topics.", - Optional: true, - Type: schema.TypeInt, - }, - "group_initial_rebalance_delay_ms": { - Description: "The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time.", - Optional: true, - Type: schema.TypeInt, - }, - "group_max_session_timeout_ms": { - Description: "The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.", - Optional: true, - Type: schema.TypeInt, - }, - "group_min_session_timeout_ms": { - Description: "The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.", - Optional: true, - Type: schema.TypeInt, - }, - "log_cleaner_delete_retention_ms": { - Description: "How long are delete records retained?.", - Optional: true, - Type: schema.TypeInt, - }, - "log_cleaner_max_compaction_lag_ms": { - Description: "The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.", - Optional: true, - Type: schema.TypeInt, - }, - "log_cleaner_min_cleanable_ratio": { - Description: "Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option.", - Optional: true, - Type: schema.TypeFloat, - }, - "log_cleaner_min_compaction_lag_ms": { - Description: "The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.", - Optional: true, - Type: schema.TypeInt, - }, - "log_cleanup_policy": { - Description: "The default cleanup policy for segments beyond the retention window.", - Optional: true, - Type: schema.TypeString, - }, - "log_flush_interval_messages": { - Description: "The number of messages accumulated on a log partition before messages are flushed to disk.", - Optional: true, - Type: schema.TypeInt, - }, - "log_flush_interval_ms": { - Description: "The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.", - Optional: true, - Type: schema.TypeInt, - }, - "log_index_interval_bytes": { - Description: "The interval with which Kafka adds an entry to the offset index.", - Optional: true, - Type: schema.TypeInt, - }, - "log_index_size_max_bytes": { - Description: "The maximum size in bytes of the offset index.", - Optional: true, - Type: schema.TypeInt, - }, - "log_local_retention_bytes": { - Description: "The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.", - Optional: true, - Type: schema.TypeInt, - }, - "log_local_retention_ms": { - Description: "The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.", - Optional: true, - Type: schema.TypeInt, - }, - "log_message_downconversion_enable": { - Description: "This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .", - Optional: true, - Type: schema.TypeBool, - }, - "log_message_timestamp_difference_max_ms": { - Description: "The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.", - Optional: true, - Type: schema.TypeInt, - }, - "log_message_timestamp_type": { - Description: "Define whether the timestamp in the message is message create time or log append time.", - Optional: true, - Type: schema.TypeString, - }, - "log_preallocate": { - Description: "Should pre allocate file when create new segment?.", - Optional: true, - Type: schema.TypeBool, - }, - "log_retention_bytes": { - Description: "The maximum size of the log before deleting messages.", - Optional: true, - Type: schema.TypeInt, - }, - "log_retention_hours": { - Description: "The number of hours to keep a log file before deleting it.", - Optional: true, - Type: schema.TypeInt, - }, - "log_retention_ms": { - Description: "The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.", - Optional: true, - Type: schema.TypeInt, - }, - "log_roll_jitter_ms": { - Description: "The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.", - Optional: true, - Type: schema.TypeInt, - }, - "log_roll_ms": { - Description: "The maximum time before a new log segment is rolled out (in milliseconds).", - Optional: true, - Type: schema.TypeInt, - }, - "log_segment_bytes": { - Description: "The maximum size of a single log file.", - Optional: true, - Type: schema.TypeInt, - }, - "log_segment_delete_delay_ms": { - Description: "The amount of time to wait before deleting a file from the filesystem.", - Optional: true, - Type: schema.TypeInt, - }, - "max_connections_per_ip": { - Description: "The maximum number of connections allowed from each ip address (defaults to 2147483647).", - Optional: true, - Type: schema.TypeInt, - }, - "max_incremental_fetch_session_cache_slots": { - Description: "The maximum number of incremental fetch sessions that the broker will maintain.", - Optional: true, - Type: schema.TypeInt, - }, - "message_max_bytes": { - Description: "The maximum size of message that the server can receive.", - Optional: true, - Type: schema.TypeInt, - }, - "min_insync_replicas": { - Description: "When a producer sets acks to 'all' (or '-1'), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful.", - Optional: true, - Type: schema.TypeInt, - }, - "num_partitions": { - Description: "Number of partitions for autocreated topics.", - Optional: true, - Type: schema.TypeInt, - }, - "offsets_retention_minutes": { - Description: "Log retention window in minutes for offsets topic.", - Optional: true, - Type: schema.TypeInt, - }, - "producer_purgatory_purge_interval_requests": { - Description: "The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).", - Optional: true, - Type: schema.TypeInt, - }, - "replica_fetch_max_bytes": { - Description: "The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.", - Optional: true, - Type: schema.TypeInt, - }, - "replica_fetch_response_max_bytes": { - Description: "Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.", - Optional: true, - Type: schema.TypeInt, - }, - "sasl_oauthbearer_expected_audience": { - Description: "The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.", - Optional: true, - Type: schema.TypeString, - }, - "sasl_oauthbearer_expected_issuer": { - Description: "Optional setting for the broker to use to verify that the JWT was created by the expected issuer.", - Optional: true, - Type: schema.TypeString, - }, - "sasl_oauthbearer_jwks_endpoint_url": { - Description: "OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. .", - Optional: true, - Type: schema.TypeString, - }, - "sasl_oauthbearer_sub_claim_name": { - Description: "Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.", - Optional: true, - Type: schema.TypeString, - }, - "socket_request_max_bytes": { - Description: "The maximum number of bytes in a socket request (defaults to 104857600).", - Optional: true, - Type: schema.TypeInt, - }, - "transaction_partition_verification_enable": { - Description: "Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.", - Optional: true, - Type: schema.TypeBool, - }, - "transaction_remove_expired_transaction_cleanup_interval_ms": { - Description: "The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).", - Optional: true, - Type: schema.TypeInt, - }, - "transaction_state_log_segment_bytes": { - Description: "The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).", - Optional: true, - Type: schema.TypeInt, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "kafka_authentication_methods": { - Description: "Kafka authentication methods.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "certificate": { - Default: true, - Description: "Enable certificate/SSL authentication. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "sasl": { - Default: false, - Description: "Enable SASL authentication. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "certificate": { - Default: true, - Description: "Enable certificate/SSL authentication. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "sasl": { - Default: false, - Description: "Enable SASL authentication. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "kafka_connect": { - Default: false, - Description: "Enable Kafka Connect service. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - "kafka_connect_config": { - Description: "Kafka Connect configuration values.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "connector_client_config_override_policy": { - Description: "Defines what client configurations can be overridden by the connector. Default is None.", - Optional: true, - Type: schema.TypeString, - }, - "consumer_auto_offset_reset": { - Description: "What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.", - Optional: true, - Type: schema.TypeString, - }, - "consumer_fetch_max_bytes": { - Description: "Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.", - Optional: true, - Type: schema.TypeInt, - }, - "consumer_isolation_level": { - Description: "Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.", - Optional: true, - Type: schema.TypeString, - }, - "consumer_max_partition_fetch_bytes": { - Description: "Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .", - Optional: true, - Type: schema.TypeInt, - }, - "consumer_max_poll_interval_ms": { - Description: "The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).", - Optional: true, - Type: schema.TypeInt, - }, - "consumer_max_poll_records": { - Description: "The maximum number of records returned in a single call to poll() (defaults to 500).", - Optional: true, - Type: schema.TypeInt, - }, - "offset_flush_interval_ms": { - Description: "The interval at which to try committing offsets for tasks (defaults to 60000).", - Optional: true, - Type: schema.TypeInt, - }, - "offset_flush_timeout_ms": { - Description: "Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).", - Optional: true, - Type: schema.TypeInt, - }, - "producer_batch_size": { - Description: "This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).", - Optional: true, - Type: schema.TypeInt, - }, - "producer_buffer_memory": { - Description: "The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).", - Optional: true, - Type: schema.TypeInt, - }, - "producer_compression_type": { - Description: "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", - Optional: true, - Type: schema.TypeString, - }, - "producer_linger_ms": { - Description: "This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.", - Optional: true, - Type: schema.TypeInt, - }, - "producer_max_request_size": { - Description: "This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.", - Optional: true, - Type: schema.TypeInt, - }, - "scheduled_rebalance_max_delay_ms": { - Description: "The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.", - Optional: true, - Type: schema.TypeInt, - }, - "session_timeout_ms": { - Description: "The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).", - Optional: true, - Type: schema.TypeInt, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "connector_client_config_override_policy": { - Description: "Defines what client configurations can be overridden by the connector. Default is None.", - Optional: true, - Type: schema.TypeString, - }, - "consumer_auto_offset_reset": { - Description: "What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.", - Optional: true, - Type: schema.TypeString, - }, - "consumer_fetch_max_bytes": { - Description: "Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.", - Optional: true, - Type: schema.TypeInt, - }, - "consumer_isolation_level": { - Description: "Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.", - Optional: true, - Type: schema.TypeString, - }, - "consumer_max_partition_fetch_bytes": { - Description: "Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .", - Optional: true, - Type: schema.TypeInt, - }, - "consumer_max_poll_interval_ms": { - Description: "The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).", - Optional: true, - Type: schema.TypeInt, - }, - "consumer_max_poll_records": { - Description: "The maximum number of records returned in a single call to poll() (defaults to 500).", - Optional: true, - Type: schema.TypeInt, - }, - "offset_flush_interval_ms": { - Description: "The interval at which to try committing offsets for tasks (defaults to 60000).", - Optional: true, - Type: schema.TypeInt, - }, - "offset_flush_timeout_ms": { - Description: "Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).", - Optional: true, - Type: schema.TypeInt, - }, - "producer_batch_size": { - Description: "This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).", - Optional: true, - Type: schema.TypeInt, - }, - "producer_buffer_memory": { - Description: "The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).", - Optional: true, - Type: schema.TypeInt, - }, - "producer_compression_type": { - Description: "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", - Optional: true, - Type: schema.TypeString, - }, - "producer_linger_ms": { - Description: "This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.", - Optional: true, - Type: schema.TypeInt, - }, - "producer_max_request_size": { - Description: "This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.", - Optional: true, - Type: schema.TypeInt, - }, - "scheduled_rebalance_max_delay_ms": { - Description: "The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.", - Optional: true, - Type: schema.TypeInt, - }, - "session_timeout_ms": { - Description: "The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).", - Optional: true, - Type: schema.TypeInt, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "kafka_rest": { - Default: false, - Description: "Enable Kafka-REST service. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - "kafka_rest_authorization": { - Description: "Enable authorization in Kafka-REST service.", - Optional: true, - Type: schema.TypeBool, - }, - "kafka_rest_config": { - Description: "Kafka REST configuration.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "consumer_enable_auto_commit": { - Default: true, - Description: "If true the consumer's offset will be periodically committed to Kafka in the background. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "consumer_request_max_bytes": { - Default: "67108864", - Description: "Maximum number of bytes in unencoded message keys and values by a single request. The default value is `67108864`.", - Optional: true, - Type: schema.TypeInt, - }, - "consumer_request_timeout_ms": { - Default: "1000", - Description: "The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.", - Optional: true, - Type: schema.TypeInt, - }, - "name_strategy": { - Default: "topic_name", - Description: "Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`.", - Optional: true, - Type: schema.TypeString, - }, - "name_strategy_validation": { - Default: true, - Description: "If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "producer_acks": { - Default: "1", - Description: "The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.", - Optional: true, - Type: schema.TypeString, - }, - "producer_compression_type": { - Description: "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", - Optional: true, - Type: schema.TypeString, - }, - "producer_linger_ms": { - Default: "0", - Description: "Wait for up to the given delay to allow batching records together. The default value is `0`.", - Optional: true, - Type: schema.TypeInt, - }, - "producer_max_request_size": { - Default: "1048576", - Description: "The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. The default value is `1048576`.", - Optional: true, - Type: schema.TypeInt, - }, - "simpleconsumer_pool_size_max": { - Default: "25", - Description: "Maximum number of SimpleConsumers that can be instantiated per broker. The default value is `25`.", - Optional: true, - Type: schema.TypeInt, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "consumer_enable_auto_commit": { - Default: true, - Description: "If true the consumer's offset will be periodically committed to Kafka in the background. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "consumer_request_max_bytes": { - Default: "67108864", - Description: "Maximum number of bytes in unencoded message keys and values by a single request. The default value is `67108864`.", - Optional: true, - Type: schema.TypeInt, - }, - "consumer_request_timeout_ms": { - Default: "1000", - Description: "The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.", - Optional: true, - Type: schema.TypeInt, - }, - "name_strategy": { - Default: "topic_name", - Description: "Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`.", - Optional: true, - Type: schema.TypeString, - }, - "name_strategy_validation": { - Default: true, - Description: "If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "producer_acks": { - Default: "1", - Description: "The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.", - Optional: true, - Type: schema.TypeString, - }, - "producer_compression_type": { - Description: "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", - Optional: true, - Type: schema.TypeString, - }, - "producer_linger_ms": { - Default: "0", - Description: "Wait for up to the given delay to allow batching records together. The default value is `0`.", - Optional: true, - Type: schema.TypeInt, - }, - "producer_max_request_size": { - Default: "1048576", - Description: "The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. The default value is `1048576`.", - Optional: true, - Type: schema.TypeInt, - }, - "simpleconsumer_pool_size_max": { - Default: "25", - Description: "Maximum number of SimpleConsumers that can be instantiated per broker. The default value is `25`.", - Optional: true, - Type: schema.TypeInt, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "kafka_version": { - Description: "Kafka major version.", - Optional: true, - Type: schema.TypeString, - }, - "private_access": { - Description: "Allow access to selected service ports from private networks.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "kafka": { - Description: "Allow clients to connect to kafka with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "kafka_connect": { - Description: "Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "kafka_rest": { - Description: "Allow clients to connect to kafka_rest with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "schema_registry": { - Description: "Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "kafka": { - Description: "Allow clients to connect to kafka with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "kafka_connect": { - Description: "Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "kafka_rest": { - Description: "Allow clients to connect to kafka_rest with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "schema_registry": { - Description: "Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "privatelink_access": { - Description: "Allow access to selected service components through Privatelink.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "jolokia": { - Description: "Enable jolokia.", - Optional: true, - Type: schema.TypeBool, - }, - "kafka": { - Description: "Enable kafka.", - Optional: true, - Type: schema.TypeBool, - }, - "kafka_connect": { - Description: "Enable kafka_connect.", - Optional: true, - Type: schema.TypeBool, - }, - "kafka_rest": { - Description: "Enable kafka_rest.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Enable prometheus.", - Optional: true, - Type: schema.TypeBool, - }, - "schema_registry": { - Description: "Enable schema_registry.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "jolokia": { - Description: "Enable jolokia.", - Optional: true, - Type: schema.TypeBool, - }, - "kafka": { - Description: "Enable kafka.", - Optional: true, - Type: schema.TypeBool, - }, - "kafka_connect": { - Description: "Enable kafka_connect.", - Optional: true, - Type: schema.TypeBool, - }, - "kafka_rest": { - Description: "Enable kafka_rest.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Enable prometheus.", - Optional: true, - Type: schema.TypeBool, - }, - "schema_registry": { - Description: "Enable schema_registry.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "public_access": { - Description: "Allow access to selected service ports from the public Internet.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "kafka": { - Description: "Allow clients to connect to kafka from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "kafka_connect": { - Description: "Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "kafka_rest": { - Description: "Allow clients to connect to kafka_rest from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "schema_registry": { - Description: "Allow clients to connect to schema_registry from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "kafka": { - Description: "Allow clients to connect to kafka from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "kafka_connect": { - Description: "Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "kafka_rest": { - Description: "Allow clients to connect to kafka_rest from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "schema_registry": { - Description: "Allow clients to connect to schema_registry from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "schema_registry": { - Default: false, - Description: "Enable Schema-Registry service. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - "schema_registry_config": { - Description: "Schema Registry configuration.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "leader_eligibility": { - Description: "If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "topic_name": { - Description: "The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to `_schemas`.", - Optional: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "leader_eligibility": { - Description: "If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "topic_name": { - Description: "The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to `_schemas`.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "service_log": { - Description: "Store logs for the service so that they are available in the HTTP API and console.", - Optional: true, - Type: schema.TypeBool, - }, - "static_ips": { - Description: "Use static public IP addresses.", - Optional: true, - Type: schema.TypeBool, - }, - "tiered_storage": { - Description: "Tiered storage configuration.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "enabled": { - Description: "Whether to enable the tiered storage functionality.", - Optional: true, - Type: schema.TypeBool, - }, - "local_cache": { - Description: "Local cache configuration.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{"size": { - Description: "Local cache size in bytes.", - Optional: true, - Type: schema.TypeInt, - }}), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{"size": { - Description: "Local cache size in bytes.", - Optional: true, - Type: schema.TypeInt, - }}}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "enabled": { - Description: "Whether to enable the tiered storage functionality.", - Optional: true, - Type: schema.TypeBool, - }, - "local_cache": { - Description: "Local cache configuration.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{"size": { - Description: "Local cache size in bytes.", - Optional: true, - Type: schema.TypeInt, - }}), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{"size": { - Description: "Local cache size in bytes.", - Optional: true, - Type: schema.TypeInt, - }}}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - } - - return &schema.Schema{ - Description: "Kafka user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// ServiceTypeKafkaConnect is a generated function returning the schema of the kafka_connect ServiceType. -func ServiceTypeKafkaConnect() *schema.Schema { - s := map[string]*schema.Schema{ - "additional_backup_regions": { - Description: "Additional Cloud Regions for Backup Replication.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter": { - Deprecated: "This will be removed in v5.0.0 and replaced with ip_filter_string instead.", - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_object": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "description": { - Description: "Description for IP filter list entry.", - Optional: true, - Type: schema.TypeString, - }, - "network": { - Description: "CIDR address block.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_string": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "kafka_connect": { - Description: "Kafka Connect configuration values.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "connector_client_config_override_policy": { - Description: "Defines what client configurations can be overridden by the connector. Default is None.", - Optional: true, - Type: schema.TypeString, - }, - "consumer_auto_offset_reset": { - Description: "What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.", - Optional: true, - Type: schema.TypeString, - }, - "consumer_fetch_max_bytes": { - Description: "Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.", - Optional: true, - Type: schema.TypeInt, - }, - "consumer_isolation_level": { - Description: "Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.", - Optional: true, - Type: schema.TypeString, - }, - "consumer_max_partition_fetch_bytes": { - Description: "Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .", - Optional: true, - Type: schema.TypeInt, - }, - "consumer_max_poll_interval_ms": { - Description: "The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).", - Optional: true, - Type: schema.TypeInt, - }, - "consumer_max_poll_records": { - Description: "The maximum number of records returned in a single call to poll() (defaults to 500).", - Optional: true, - Type: schema.TypeInt, - }, - "offset_flush_interval_ms": { - Description: "The interval at which to try committing offsets for tasks (defaults to 60000).", - Optional: true, - Type: schema.TypeInt, - }, - "offset_flush_timeout_ms": { - Description: "Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).", - Optional: true, - Type: schema.TypeInt, - }, - "producer_batch_size": { - Description: "This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).", - Optional: true, - Type: schema.TypeInt, - }, - "producer_buffer_memory": { - Description: "The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).", - Optional: true, - Type: schema.TypeInt, - }, - "producer_compression_type": { - Description: "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", - Optional: true, - Type: schema.TypeString, - }, - "producer_linger_ms": { - Description: "This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.", - Optional: true, - Type: schema.TypeInt, - }, - "producer_max_request_size": { - Description: "This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.", - Optional: true, - Type: schema.TypeInt, - }, - "scheduled_rebalance_max_delay_ms": { - Description: "The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.", - Optional: true, - Type: schema.TypeInt, - }, - "session_timeout_ms": { - Description: "The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).", - Optional: true, - Type: schema.TypeInt, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "connector_client_config_override_policy": { - Description: "Defines what client configurations can be overridden by the connector. Default is None.", - Optional: true, - Type: schema.TypeString, - }, - "consumer_auto_offset_reset": { - Description: "What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.", - Optional: true, - Type: schema.TypeString, - }, - "consumer_fetch_max_bytes": { - Description: "Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.", - Optional: true, - Type: schema.TypeInt, - }, - "consumer_isolation_level": { - Description: "Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.", - Optional: true, - Type: schema.TypeString, - }, - "consumer_max_partition_fetch_bytes": { - Description: "Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .", - Optional: true, - Type: schema.TypeInt, - }, - "consumer_max_poll_interval_ms": { - Description: "The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).", - Optional: true, - Type: schema.TypeInt, - }, - "consumer_max_poll_records": { - Description: "The maximum number of records returned in a single call to poll() (defaults to 500).", - Optional: true, - Type: schema.TypeInt, - }, - "offset_flush_interval_ms": { - Description: "The interval at which to try committing offsets for tasks (defaults to 60000).", - Optional: true, - Type: schema.TypeInt, - }, - "offset_flush_timeout_ms": { - Description: "Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).", - Optional: true, - Type: schema.TypeInt, - }, - "producer_batch_size": { - Description: "This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).", - Optional: true, - Type: schema.TypeInt, - }, - "producer_buffer_memory": { - Description: "The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).", - Optional: true, - Type: schema.TypeInt, - }, - "producer_compression_type": { - Description: "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", - Optional: true, - Type: schema.TypeString, - }, - "producer_linger_ms": { - Description: "This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0.", - Optional: true, - Type: schema.TypeInt, - }, - "producer_max_request_size": { - Description: "This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.", - Optional: true, - Type: schema.TypeInt, - }, - "scheduled_rebalance_max_delay_ms": { - Description: "The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.", - Optional: true, - Type: schema.TypeInt, - }, - "session_timeout_ms": { - Description: "The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).", - Optional: true, - Type: schema.TypeInt, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "private_access": { - Description: "Allow access to selected service ports from private networks.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "kafka_connect": { - Description: "Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "kafka_connect": { - Description: "Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "privatelink_access": { - Description: "Allow access to selected service components through Privatelink.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "jolokia": { - Description: "Enable jolokia.", - Optional: true, - Type: schema.TypeBool, - }, - "kafka_connect": { - Description: "Enable kafka_connect.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Enable prometheus.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "jolokia": { - Description: "Enable jolokia.", - Optional: true, - Type: schema.TypeBool, - }, - "kafka_connect": { - Description: "Enable kafka_connect.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Enable prometheus.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "public_access": { - Description: "Allow access to selected service ports from the public Internet.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "kafka_connect": { - Description: "Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "kafka_connect": { - Description: "Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "service_log": { - Description: "Store logs for the service so that they are available in the HTTP API and console.", - Optional: true, - Type: schema.TypeBool, - }, - "static_ips": { - Description: "Use static public IP addresses.", - Optional: true, - Type: schema.TypeBool, - }, - } - - return &schema.Schema{ - Description: "KafkaConnect user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// ServiceTypeKafkaMirrormaker is a generated function returning the schema of the kafka_mirrormaker ServiceType. -func ServiceTypeKafkaMirrormaker() *schema.Schema { - s := map[string]*schema.Schema{ - "additional_backup_regions": { - Description: "Additional Cloud Regions for Backup Replication.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter": { - Deprecated: "This will be removed in v5.0.0 and replaced with ip_filter_string instead.", - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_object": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "description": { - Description: "Description for IP filter list entry.", - Optional: true, - Type: schema.TypeString, - }, - "network": { - Description: "CIDR address block.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_string": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "kafka_mirrormaker": { - Description: "Kafka MirrorMaker configuration values.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "emit_checkpoints_enabled": { - Description: "Whether to emit consumer group offset checkpoints to target cluster periodically (default: true).", - Optional: true, - Type: schema.TypeBool, - }, - "emit_checkpoints_interval_seconds": { - Description: "Frequency at which consumer group offset checkpoints are emitted (default: 60, every minute).", - Optional: true, - Type: schema.TypeInt, - }, - "groups": { - Description: "Consumer groups to replicate. Supports comma-separated group IDs and regexes.", - Optional: true, - Type: schema.TypeString, - }, - "groups_exclude": { - Description: "Exclude groups. Supports comma-separated group IDs and regexes. Excludes take precedence over includes.", - Optional: true, - Type: schema.TypeString, - }, - "offset_lag_max": { - Description: "How out-of-sync a remote partition can be before it is resynced.", - Optional: true, - Type: schema.TypeInt, - }, - "refresh_groups_enabled": { - Description: "Whether to periodically check for new consumer groups. Defaults to 'true'.", - Optional: true, - Type: schema.TypeBool, - }, - "refresh_groups_interval_seconds": { - Description: "Frequency of consumer group refresh in seconds. Defaults to 600 seconds (10 minutes).", - Optional: true, - Type: schema.TypeInt, - }, - "refresh_topics_enabled": { - Description: "Whether to periodically check for new topics and partitions. Defaults to 'true'.", - Optional: true, - Type: schema.TypeBool, - }, - "refresh_topics_interval_seconds": { - Description: "Frequency of topic and partitions refresh in seconds. Defaults to 600 seconds (10 minutes).", - Optional: true, - Type: schema.TypeInt, - }, - "sync_group_offsets_enabled": { - Description: "Whether to periodically write the translated offsets of replicated consumer groups (in the source cluster) to __consumer_offsets topic in target cluster, as long as no active consumers in that group are connected to the target cluster.", - Optional: true, - Type: schema.TypeBool, - }, - "sync_group_offsets_interval_seconds": { - Description: "Frequency at which consumer group offsets are synced (default: 60, every minute).", - Optional: true, - Type: schema.TypeInt, - }, - "sync_topic_configs_enabled": { - Description: "Whether to periodically configure remote topics to match their corresponding upstream topics.", - Optional: true, - Type: schema.TypeBool, - }, - "tasks_max_per_cpu": { - Default: "1", - Description: "'tasks.max' is set to this multiplied by the number of CPUs in the service. The default value is `1`.", - Optional: true, - Type: schema.TypeInt, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "emit_checkpoints_enabled": { - Description: "Whether to emit consumer group offset checkpoints to target cluster periodically (default: true).", - Optional: true, - Type: schema.TypeBool, - }, - "emit_checkpoints_interval_seconds": { - Description: "Frequency at which consumer group offset checkpoints are emitted (default: 60, every minute).", - Optional: true, - Type: schema.TypeInt, - }, - "groups": { - Description: "Consumer groups to replicate. Supports comma-separated group IDs and regexes.", - Optional: true, - Type: schema.TypeString, - }, - "groups_exclude": { - Description: "Exclude groups. Supports comma-separated group IDs and regexes. Excludes take precedence over includes.", - Optional: true, - Type: schema.TypeString, - }, - "offset_lag_max": { - Description: "How out-of-sync a remote partition can be before it is resynced.", - Optional: true, - Type: schema.TypeInt, - }, - "refresh_groups_enabled": { - Description: "Whether to periodically check for new consumer groups. Defaults to 'true'.", - Optional: true, - Type: schema.TypeBool, - }, - "refresh_groups_interval_seconds": { - Description: "Frequency of consumer group refresh in seconds. Defaults to 600 seconds (10 minutes).", - Optional: true, - Type: schema.TypeInt, - }, - "refresh_topics_enabled": { - Description: "Whether to periodically check for new topics and partitions. Defaults to 'true'.", - Optional: true, - Type: schema.TypeBool, - }, - "refresh_topics_interval_seconds": { - Description: "Frequency of topic and partitions refresh in seconds. Defaults to 600 seconds (10 minutes).", - Optional: true, - Type: schema.TypeInt, - }, - "sync_group_offsets_enabled": { - Description: "Whether to periodically write the translated offsets of replicated consumer groups (in the source cluster) to __consumer_offsets topic in target cluster, as long as no active consumers in that group are connected to the target cluster.", - Optional: true, - Type: schema.TypeBool, - }, - "sync_group_offsets_interval_seconds": { - Description: "Frequency at which consumer group offsets are synced (default: 60, every minute).", - Optional: true, - Type: schema.TypeInt, - }, - "sync_topic_configs_enabled": { - Description: "Whether to periodically configure remote topics to match their corresponding upstream topics.", - Optional: true, - Type: schema.TypeBool, - }, - "tasks_max_per_cpu": { - Default: "1", - Description: "'tasks.max' is set to this multiplied by the number of CPUs in the service. The default value is `1`.", - Optional: true, - Type: schema.TypeInt, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "service_log": { - Description: "Store logs for the service so that they are available in the HTTP API and console.", - Optional: true, - Type: schema.TypeBool, - }, - "static_ips": { - Description: "Use static public IP addresses.", - Optional: true, - Type: schema.TypeBool, - }, - } - - return &schema.Schema{ - Description: "KafkaMirrormaker user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// ServiceTypeM3aggregator is a generated function returning the schema of the m3aggregator ServiceType. -func ServiceTypeM3aggregator() *schema.Schema { - s := map[string]*schema.Schema{ - "custom_domain": { - Description: "Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.", - Optional: true, - Type: schema.TypeString, - }, - "ip_filter": { - Deprecated: "This will be removed in v5.0.0 and replaced with ip_filter_string instead.", - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_object": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "description": { - Description: "Description for IP filter list entry.", - Optional: true, - Type: schema.TypeString, - }, - "network": { - Description: "CIDR address block.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_string": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "m3_version": { - Deprecated: "Usage of this field is discouraged.", - Description: "M3 major version (deprecated, use m3aggregator_version).", - Optional: true, - Type: schema.TypeString, - }, - "m3aggregator_version": { - Description: "M3 major version (the minimum compatible version).", - Optional: true, - Type: schema.TypeString, - }, - "service_log": { - Description: "Store logs for the service so that they are available in the HTTP API and console.", - Optional: true, - Type: schema.TypeBool, - }, - "static_ips": { - Description: "Use static public IP addresses.", - Optional: true, - Type: schema.TypeBool, - }, - } - - return &schema.Schema{ - Description: "M3aggregator user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// ServiceTypeM3db is a generated function returning the schema of the m3db ServiceType. -func ServiceTypeM3db() *schema.Schema { - s := map[string]*schema.Schema{ - "additional_backup_regions": { - Description: "Additional Cloud Regions for Backup Replication.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "custom_domain": { - Description: "Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.", - Optional: true, - Type: schema.TypeString, - }, - "ip_filter": { - Deprecated: "This will be removed in v5.0.0 and replaced with ip_filter_string instead.", - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_object": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "description": { - Description: "Description for IP filter list entry.", - Optional: true, - Type: schema.TypeString, - }, - "network": { - Description: "CIDR address block.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_string": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "limits": { - Description: "M3 limits.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "max_recently_queried_series_blocks": { - Description: "The maximum number of blocks that can be read in a given lookback period.", - Optional: true, - Type: schema.TypeInt, - }, - "max_recently_queried_series_disk_bytes_read": { - Description: "The maximum number of disk bytes that can be read in a given lookback period.", - Optional: true, - Type: schema.TypeInt, - }, - "max_recently_queried_series_lookback": { - Description: "The lookback period for 'max_recently_queried_series_blocks' and 'max_recently_queried_series_disk_bytes_read'.", - Optional: true, - Type: schema.TypeString, - }, - "query_docs": { - Description: "The maximum number of docs fetched in single query.", - Optional: true, - Type: schema.TypeInt, - }, - "query_require_exhaustive": { - Description: "When query limits are exceeded, whether to return error or return partial results.", - Optional: true, - Type: schema.TypeBool, - }, - "query_series": { - Description: "The maximum number of series fetched in single query.", - Optional: true, - Type: schema.TypeInt, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "max_recently_queried_series_blocks": { - Description: "The maximum number of blocks that can be read in a given lookback period.", - Optional: true, - Type: schema.TypeInt, - }, - "max_recently_queried_series_disk_bytes_read": { - Description: "The maximum number of disk bytes that can be read in a given lookback period.", - Optional: true, - Type: schema.TypeInt, - }, - "max_recently_queried_series_lookback": { - Description: "The lookback period for 'max_recently_queried_series_blocks' and 'max_recently_queried_series_disk_bytes_read'.", - Optional: true, - Type: schema.TypeString, - }, - "query_docs": { - Description: "The maximum number of docs fetched in single query.", - Optional: true, - Type: schema.TypeInt, - }, - "query_require_exhaustive": { - Description: "When query limits are exceeded, whether to return error or return partial results.", - Optional: true, - Type: schema.TypeBool, - }, - "query_series": { - Description: "The maximum number of series fetched in single query.", - Optional: true, - Type: schema.TypeInt, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "m3": { - Description: "M3 specific configuration options.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{"tag_options": { - Description: "M3 Tag Options.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "allow_tag_name_duplicates": { - Description: "Allows for duplicate tags to appear on series (not allowed by default).", - Optional: true, - Type: schema.TypeBool, - }, - "allow_tag_value_empty": { - Description: "Allows for empty tags to appear on series (not allowed by default).", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "allow_tag_name_duplicates": { - Description: "Allows for duplicate tags to appear on series (not allowed by default).", - Optional: true, - Type: schema.TypeBool, - }, - "allow_tag_value_empty": { - Description: "Allows for empty tags to appear on series (not allowed by default).", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }}), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{"tag_options": { - Description: "M3 Tag Options.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "allow_tag_name_duplicates": { - Description: "Allows for duplicate tags to appear on series (not allowed by default).", - Optional: true, - Type: schema.TypeBool, - }, - "allow_tag_value_empty": { - Description: "Allows for empty tags to appear on series (not allowed by default).", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "allow_tag_name_duplicates": { - Description: "Allows for duplicate tags to appear on series (not allowed by default).", - Optional: true, - Type: schema.TypeBool, - }, - "allow_tag_value_empty": { - Description: "Allows for empty tags to appear on series (not allowed by default).", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }}}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "m3_version": { - Deprecated: "Usage of this field is discouraged.", - Description: "M3 major version (deprecated, use m3db_version).", - Optional: true, - Type: schema.TypeString, - }, - "m3coordinator_enable_graphite_carbon_ingest": { - Description: "Enables access to Graphite Carbon plaintext metrics ingestion. It can be enabled only for services inside VPCs. The metrics are written to aggregated namespaces only.", - Optional: true, - Type: schema.TypeBool, - }, - "m3db_version": { - Description: "M3 major version (the minimum compatible version).", - Optional: true, - Type: schema.TypeString, - }, - "namespaces": { - Description: "List of M3 namespaces.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "name": { - Description: "The name of the namespace.", - Required: true, - Type: schema.TypeString, - }, - "options": { - Description: "Namespace options.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "retention_options": { - Description: "Retention options.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "block_data_expiry_duration": { - Description: "Controls how long we wait before expiring stale data.", - Optional: true, - Type: schema.TypeString, - }, - "blocksize_duration": { - Description: "Controls how long to keep a block in memory before flushing to a fileset on disk.", - Optional: true, - Type: schema.TypeString, - }, - "buffer_future_duration": { - Description: "Controls how far into the future writes to the namespace will be accepted.", - Optional: true, - Type: schema.TypeString, - }, - "buffer_past_duration": { - Description: "Controls how far into the past writes to the namespace will be accepted.", - Optional: true, - Type: schema.TypeString, - }, - "retention_period_duration": { - Description: "Controls the duration of time that M3DB will retain data for the namespace.", - Optional: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "block_data_expiry_duration": { - Description: "Controls how long we wait before expiring stale data.", - Optional: true, - Type: schema.TypeString, - }, - "blocksize_duration": { - Description: "Controls how long to keep a block in memory before flushing to a fileset on disk.", - Optional: true, - Type: schema.TypeString, - }, - "buffer_future_duration": { - Description: "Controls how far into the future writes to the namespace will be accepted.", - Optional: true, - Type: schema.TypeString, - }, - "buffer_past_duration": { - Description: "Controls how far into the past writes to the namespace will be accepted.", - Optional: true, - Type: schema.TypeString, - }, - "retention_period_duration": { - Description: "Controls the duration of time that M3DB will retain data for the namespace.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "snapshot_enabled": { - Description: "Controls whether M3DB will create snapshot files for this namespace.", - Optional: true, - Type: schema.TypeBool, - }, - "writes_to_commitlog": { - Description: "Controls whether M3DB will include writes to this namespace in the commitlog.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "retention_options": { - Description: "Retention options.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "block_data_expiry_duration": { - Description: "Controls how long we wait before expiring stale data.", - Optional: true, - Type: schema.TypeString, - }, - "blocksize_duration": { - Description: "Controls how long to keep a block in memory before flushing to a fileset on disk.", - Optional: true, - Type: schema.TypeString, - }, - "buffer_future_duration": { - Description: "Controls how far into the future writes to the namespace will be accepted.", - Optional: true, - Type: schema.TypeString, - }, - "buffer_past_duration": { - Description: "Controls how far into the past writes to the namespace will be accepted.", - Optional: true, - Type: schema.TypeString, - }, - "retention_period_duration": { - Description: "Controls the duration of time that M3DB will retain data for the namespace.", - Optional: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "block_data_expiry_duration": { - Description: "Controls how long we wait before expiring stale data.", - Optional: true, - Type: schema.TypeString, - }, - "blocksize_duration": { - Description: "Controls how long to keep a block in memory before flushing to a fileset on disk.", - Optional: true, - Type: schema.TypeString, - }, - "buffer_future_duration": { - Description: "Controls how far into the future writes to the namespace will be accepted.", - Optional: true, - Type: schema.TypeString, - }, - "buffer_past_duration": { - Description: "Controls how far into the past writes to the namespace will be accepted.", - Optional: true, - Type: schema.TypeString, - }, - "retention_period_duration": { - Description: "Controls the duration of time that M3DB will retain data for the namespace.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "snapshot_enabled": { - Description: "Controls whether M3DB will create snapshot files for this namespace.", - Optional: true, - Type: schema.TypeBool, - }, - "writes_to_commitlog": { - Description: "Controls whether M3DB will include writes to this namespace in the commitlog.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "resolution": { - Description: "The resolution for an aggregated namespace.", - Optional: true, - Type: schema.TypeString, - }, - "type": { - Description: "The type of aggregation (aggregated/unaggregated).", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 2147483647, - Optional: true, - Type: schema.TypeList, - }, - "private_access": { - Description: "Allow access to selected service ports from private networks.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{"m3coordinator": { - Description: "Allow clients to connect to m3coordinator with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }}), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{"m3coordinator": { - Description: "Allow clients to connect to m3coordinator with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }}}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "project_to_fork_from": { - Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "public_access": { - Description: "Allow access to selected service ports from the public Internet.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{"m3coordinator": { - Description: "Allow clients to connect to m3coordinator from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }}), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{"m3coordinator": { - Description: "Allow clients to connect to m3coordinator from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }}}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "rules": { - Description: "M3 rules.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{"mapping": { - Description: "List of M3 mapping rules.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "aggregations": { - Description: "List of aggregations to be applied.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 10, - Optional: true, - Type: schema.TypeList, - }, - "drop": { - Description: "Only store the derived metric (as specified in the roll-up rules), if any.", - Optional: true, - Type: schema.TypeBool, - }, - "filter": { - Description: "Matching metric names with wildcards (using __name__:wildcard) or matching tags and their (optionally wildcarded) values. For value, ! can be used at start of value for negation, and multiple filters can be supplied using space as separator.", - Required: true, - Type: schema.TypeString, - }, - "name": { - Description: "The (optional) name of the rule.", - Optional: true, - Type: schema.TypeString, - }, - "namespaces": { - Deprecated: "This will be removed in v5.0.0 and replaced with namespaces_string instead.", - Description: "This rule will be used to store the metrics in the given namespace(s). If a namespace is target of rules, the global default aggregation will be automatically disabled. Note that specifying filters that match no namespaces whatsoever will be returned as an error. Filter the namespace by glob (=wildcards).", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 10, - Optional: true, - Type: schema.TypeList, - }, - "namespaces_object": { - Description: "This rule will be used to store the metrics in the given namespace(s). If a namespace is target of rules, the global default aggregation will be automatically disabled. Note that specifying filters that match no namespaces whatsoever will be returned as an error. Filter the namespace by exact match of retention period and resolution.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "resolution": { - Description: "The resolution for the matching namespace.", - Optional: true, - Type: schema.TypeString, - }, - "retention": { - Description: "The retention period of the matching namespace.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 10, - Optional: true, - Type: schema.TypeList, - }, - "namespaces_string": { - Description: "This rule will be used to store the metrics in the given namespace(s). If a namespace is target of rules, the global default aggregation will be automatically disabled. Note that specifying filters that match no namespaces whatsoever will be returned as an error. Filter the namespace by glob (=wildcards).", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 10, - Optional: true, - Type: schema.TypeList, - }, - "tags": { - Description: "List of tags to be appended to matching metrics.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "name": { - Description: "Name of the tag.", - Required: true, - Type: schema.TypeString, - }, - "value": { - Description: "Value of the tag.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 10, - Optional: true, - Type: schema.TypeList, - }, - }}, - MaxItems: 10, - Optional: true, - Type: schema.TypeList, - }}), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{"mapping": { - Description: "List of M3 mapping rules.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "aggregations": { - Description: "List of aggregations to be applied.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 10, - Optional: true, - Type: schema.TypeList, - }, - "drop": { - Description: "Only store the derived metric (as specified in the roll-up rules), if any.", - Optional: true, - Type: schema.TypeBool, - }, - "filter": { - Description: "Matching metric names with wildcards (using __name__:wildcard) or matching tags and their (optionally wildcarded) values. For value, ! can be used at start of value for negation, and multiple filters can be supplied using space as separator.", - Required: true, - Type: schema.TypeString, - }, - "name": { - Description: "The (optional) name of the rule.", - Optional: true, - Type: schema.TypeString, - }, - "namespaces": { - Deprecated: "This will be removed in v5.0.0 and replaced with namespaces_string instead.", - Description: "This rule will be used to store the metrics in the given namespace(s). If a namespace is target of rules, the global default aggregation will be automatically disabled. Note that specifying filters that match no namespaces whatsoever will be returned as an error. Filter the namespace by glob (=wildcards).", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 10, - Optional: true, - Type: schema.TypeList, - }, - "namespaces_object": { - Description: "This rule will be used to store the metrics in the given namespace(s). If a namespace is target of rules, the global default aggregation will be automatically disabled. Note that specifying filters that match no namespaces whatsoever will be returned as an error. Filter the namespace by exact match of retention period and resolution.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "resolution": { - Description: "The resolution for the matching namespace.", - Optional: true, - Type: schema.TypeString, - }, - "retention": { - Description: "The retention period of the matching namespace.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 10, - Optional: true, - Type: schema.TypeList, - }, - "namespaces_string": { - Description: "This rule will be used to store the metrics in the given namespace(s). If a namespace is target of rules, the global default aggregation will be automatically disabled. Note that specifying filters that match no namespaces whatsoever will be returned as an error. Filter the namespace by glob (=wildcards).", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 10, - Optional: true, - Type: schema.TypeList, - }, - "tags": { - Description: "List of tags to be appended to matching metrics.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "name": { - Description: "Name of the tag.", - Required: true, - Type: schema.TypeString, - }, - "value": { - Description: "Value of the tag.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 10, - Optional: true, - Type: schema.TypeList, - }, - }}, - MaxItems: 10, - Optional: true, - Type: schema.TypeList, - }}}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "service_log": { - Description: "Store logs for the service so that they are available in the HTTP API and console.", - Optional: true, - Type: schema.TypeBool, - }, - "service_to_fork_from": { - Description: "Name of another service to fork from. This has effect only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "static_ips": { - Description: "Use static public IP addresses.", - Optional: true, - Type: schema.TypeBool, - }, - } - - return &schema.Schema{ - Description: "M3db user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// ServiceTypeMysql is a generated function returning the schema of the mysql ServiceType. -func ServiceTypeMysql() *schema.Schema { - s := map[string]*schema.Schema{ - "additional_backup_regions": { - Description: "Additional Cloud Regions for Backup Replication.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "admin_password": { - Description: "Custom password for admin user. Defaults to random string. This must be set only when a new service is being created.", - ForceNew: true, - Optional: true, - Sensitive: true, - Type: schema.TypeString, - }, - "admin_username": { - Description: "Custom username for admin user. This must be set only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "backup_hour": { - Description: "The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed.", - Optional: true, - Type: schema.TypeInt, - }, - "backup_minute": { - Description: "The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed.", - Optional: true, - Type: schema.TypeInt, - }, - "binlog_retention_period": { - Description: "The minimum amount of time in seconds to keep binlog entries before deletion. This may be extended for services that require binlog entries for longer than the default for example if using the MySQL Debezium Kafka connector.", - Optional: true, - Type: schema.TypeInt, - }, - "ip_filter": { - Deprecated: "This will be removed in v5.0.0 and replaced with ip_filter_string instead.", - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_object": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "description": { - Description: "Description for IP filter list entry.", - Optional: true, - Type: schema.TypeString, - }, - "network": { - Description: "CIDR address block.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_string": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "migration": { - Description: "Migrate data from existing server.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "dbname": { - Description: "Database name for bootstrapping the initial connection.", - Optional: true, - Type: schema.TypeString, - }, - "host": { - Description: "Hostname or IP address of the server where to migrate data from.", - Required: true, - Type: schema.TypeString, - }, - "ignore_dbs": { - Description: "Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).", - Optional: true, - Type: schema.TypeString, - }, - "method": { - Description: "The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).", - Optional: true, - Type: schema.TypeString, - }, - "password": { - Description: "Password for authentication with the server where to migrate data from.", - Optional: true, - Sensitive: true, - Type: schema.TypeString, - }, - "port": { - Description: "Port number of the server where to migrate data from.", - Required: true, - Type: schema.TypeInt, - }, - "ssl": { - Default: true, - Description: "The server where to migrate data from is secured with SSL. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "username": { - Description: "User name for authentication with the server where to migrate data from.", - Optional: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "dbname": { - Description: "Database name for bootstrapping the initial connection.", - Optional: true, - Type: schema.TypeString, - }, - "host": { - Description: "Hostname or IP address of the server where to migrate data from.", - Required: true, - Type: schema.TypeString, - }, - "ignore_dbs": { - Description: "Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).", - Optional: true, - Type: schema.TypeString, - }, - "method": { - Description: "The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).", - Optional: true, - Type: schema.TypeString, - }, - "password": { - Description: "Password for authentication with the server where to migrate data from.", - Optional: true, - Sensitive: true, - Type: schema.TypeString, - }, - "port": { - Description: "Port number of the server where to migrate data from.", - Required: true, - Type: schema.TypeInt, - }, - "ssl": { - Default: true, - Description: "The server where to migrate data from is secured with SSL. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "username": { - Description: "User name for authentication with the server where to migrate data from.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "mysql": { - Description: "mysql.conf configuration values.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "connect_timeout": { - Description: "The number of seconds that the mysqld server waits for a connect packet before responding with Bad handshake.", - Optional: true, - Type: schema.TypeInt, - }, - "default_time_zone": { - Description: "Default server time zone as an offset from UTC (from -12:00 to +12:00), a time zone name, or 'SYSTEM' to use the MySQL server default.", - Optional: true, - Type: schema.TypeString, - }, - "group_concat_max_len": { - Description: "The maximum permitted result length in bytes for the GROUP_CONCAT() function.", - Optional: true, - Type: schema.TypeInt, - }, - "information_schema_stats_expiry": { - Description: "The time, in seconds, before cached statistics expire.", - Optional: true, - Type: schema.TypeInt, - }, - "innodb_change_buffer_max_size": { - Description: "Maximum size for the InnoDB change buffer, as a percentage of the total size of the buffer pool. Default is 25.", - Optional: true, - Type: schema.TypeInt, - }, - "innodb_flush_neighbors": { - Description: "Specifies whether flushing a page from the InnoDB buffer pool also flushes other dirty pages in the same extent (default is 1): 0 - dirty pages in the same extent are not flushed, 1 - flush contiguous dirty pages in the same extent, 2 - flush dirty pages in the same extent.", - Optional: true, - Type: schema.TypeInt, - }, - "innodb_ft_min_token_size": { - Description: "Minimum length of words that are stored in an InnoDB FULLTEXT index. Changing this parameter will lead to a restart of the MySQL service.", - Optional: true, - Type: schema.TypeInt, - }, - "innodb_ft_server_stopword_table": { - Description: "This option is used to specify your own InnoDB FULLTEXT index stopword list for all InnoDB tables.", - Optional: true, - Type: schema.TypeString, - }, - "innodb_lock_wait_timeout": { - Description: "The length of time in seconds an InnoDB transaction waits for a row lock before giving up. Default is 120.", - Optional: true, - Type: schema.TypeInt, - }, - "innodb_log_buffer_size": { - Description: "The size in bytes of the buffer that InnoDB uses to write to the log files on disk.", - Optional: true, - Type: schema.TypeInt, - }, - "innodb_online_alter_log_max_size": { - Description: "The upper limit in bytes on the size of the temporary log files used during online DDL operations for InnoDB tables.", - Optional: true, - Type: schema.TypeInt, - }, - "innodb_print_all_deadlocks": { - Description: "When enabled, information about all deadlocks in InnoDB user transactions is recorded in the error log. Disabled by default.", - Optional: true, - Type: schema.TypeBool, - }, - "innodb_read_io_threads": { - Description: "The number of I/O threads for read operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.", - Optional: true, - Type: schema.TypeInt, - }, - "innodb_rollback_on_timeout": { - Description: "When enabled a transaction timeout causes InnoDB to abort and roll back the entire transaction. Changing this parameter will lead to a restart of the MySQL service.", - Optional: true, - Type: schema.TypeBool, - }, - "innodb_thread_concurrency": { - Description: "Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit).", - Optional: true, - Type: schema.TypeInt, - }, - "innodb_write_io_threads": { - Description: "The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.", - Optional: true, - Type: schema.TypeInt, - }, - "interactive_timeout": { - Description: "The number of seconds the server waits for activity on an interactive connection before closing it.", - Optional: true, - Type: schema.TypeInt, - }, - "internal_tmp_mem_storage_engine": { - Description: "The storage engine for in-memory internal temporary tables.", - Optional: true, - Type: schema.TypeString, - }, - "long_query_time": { - Description: "The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Default is 10s.", - Optional: true, - Type: schema.TypeFloat, - }, - "max_allowed_packet": { - Description: "Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M).", - Optional: true, - Type: schema.TypeInt, - }, - "max_heap_table_size": { - Description: "Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M).", - Optional: true, - Type: schema.TypeInt, - }, - "net_buffer_length": { - Description: "Start sizes of connection buffer and result buffer. Default is 16384 (16K). Changing this parameter will lead to a restart of the MySQL service.", - Optional: true, - Type: schema.TypeInt, - }, - "net_read_timeout": { - Description: "The number of seconds to wait for more data from a connection before aborting the read.", - Optional: true, - Type: schema.TypeInt, - }, - "net_write_timeout": { - Description: "The number of seconds to wait for a block to be written to a connection before aborting the write.", - Optional: true, - Type: schema.TypeInt, - }, - "slow_query_log": { - Description: "Slow query log enables capturing of slow queries. Setting slow_query_log to false also truncates the mysql.slow_log table. Default is off.", - Optional: true, - Type: schema.TypeBool, - }, - "sort_buffer_size": { - Description: "Sort buffer size in bytes for ORDER BY optimization. Default is 262144 (256K).", - Optional: true, - Type: schema.TypeInt, - }, - "sql_mode": { - Description: "Global SQL mode. Set to empty to use MySQL server defaults. When creating a new service and not setting this field Aiven default SQL mode (strict, SQL standard compliant) will be assigned.", - Optional: true, - Type: schema.TypeString, - }, - "sql_require_primary_key": { - Description: "Require primary key to be defined for new tables or old tables modified with ALTER TABLE and fail if missing. It is recommended to always have primary keys because various functionality may break if any large table is missing them.", - Optional: true, - Type: schema.TypeBool, - }, - "tmp_table_size": { - Description: "Limits the size of internal in-memory tables. Also set max_heap_table_size. Default is 16777216 (16M).", - Optional: true, - Type: schema.TypeInt, - }, - "wait_timeout": { - Description: "The number of seconds the server waits for activity on a noninteractive connection before closing it.", - Optional: true, - Type: schema.TypeInt, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "connect_timeout": { - Description: "The number of seconds that the mysqld server waits for a connect packet before responding with Bad handshake.", - Optional: true, - Type: schema.TypeInt, - }, - "default_time_zone": { - Description: "Default server time zone as an offset from UTC (from -12:00 to +12:00), a time zone name, or 'SYSTEM' to use the MySQL server default.", - Optional: true, - Type: schema.TypeString, - }, - "group_concat_max_len": { - Description: "The maximum permitted result length in bytes for the GROUP_CONCAT() function.", - Optional: true, - Type: schema.TypeInt, - }, - "information_schema_stats_expiry": { - Description: "The time, in seconds, before cached statistics expire.", - Optional: true, - Type: schema.TypeInt, - }, - "innodb_change_buffer_max_size": { - Description: "Maximum size for the InnoDB change buffer, as a percentage of the total size of the buffer pool. Default is 25.", - Optional: true, - Type: schema.TypeInt, - }, - "innodb_flush_neighbors": { - Description: "Specifies whether flushing a page from the InnoDB buffer pool also flushes other dirty pages in the same extent (default is 1): 0 - dirty pages in the same extent are not flushed, 1 - flush contiguous dirty pages in the same extent, 2 - flush dirty pages in the same extent.", - Optional: true, - Type: schema.TypeInt, - }, - "innodb_ft_min_token_size": { - Description: "Minimum length of words that are stored in an InnoDB FULLTEXT index. Changing this parameter will lead to a restart of the MySQL service.", - Optional: true, - Type: schema.TypeInt, - }, - "innodb_ft_server_stopword_table": { - Description: "This option is used to specify your own InnoDB FULLTEXT index stopword list for all InnoDB tables.", - Optional: true, - Type: schema.TypeString, - }, - "innodb_lock_wait_timeout": { - Description: "The length of time in seconds an InnoDB transaction waits for a row lock before giving up. Default is 120.", - Optional: true, - Type: schema.TypeInt, - }, - "innodb_log_buffer_size": { - Description: "The size in bytes of the buffer that InnoDB uses to write to the log files on disk.", - Optional: true, - Type: schema.TypeInt, - }, - "innodb_online_alter_log_max_size": { - Description: "The upper limit in bytes on the size of the temporary log files used during online DDL operations for InnoDB tables.", - Optional: true, - Type: schema.TypeInt, - }, - "innodb_print_all_deadlocks": { - Description: "When enabled, information about all deadlocks in InnoDB user transactions is recorded in the error log. Disabled by default.", - Optional: true, - Type: schema.TypeBool, - }, - "innodb_read_io_threads": { - Description: "The number of I/O threads for read operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.", - Optional: true, - Type: schema.TypeInt, - }, - "innodb_rollback_on_timeout": { - Description: "When enabled a transaction timeout causes InnoDB to abort and roll back the entire transaction. Changing this parameter will lead to a restart of the MySQL service.", - Optional: true, - Type: schema.TypeBool, - }, - "innodb_thread_concurrency": { - Description: "Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit).", - Optional: true, - Type: schema.TypeInt, - }, - "innodb_write_io_threads": { - Description: "The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.", - Optional: true, - Type: schema.TypeInt, - }, - "interactive_timeout": { - Description: "The number of seconds the server waits for activity on an interactive connection before closing it.", - Optional: true, - Type: schema.TypeInt, - }, - "internal_tmp_mem_storage_engine": { - Description: "The storage engine for in-memory internal temporary tables.", - Optional: true, - Type: schema.TypeString, - }, - "long_query_time": { - Description: "The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Default is 10s.", - Optional: true, - Type: schema.TypeFloat, - }, - "max_allowed_packet": { - Description: "Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M).", - Optional: true, - Type: schema.TypeInt, - }, - "max_heap_table_size": { - Description: "Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M).", - Optional: true, - Type: schema.TypeInt, - }, - "net_buffer_length": { - Description: "Start sizes of connection buffer and result buffer. Default is 16384 (16K). Changing this parameter will lead to a restart of the MySQL service.", - Optional: true, - Type: schema.TypeInt, - }, - "net_read_timeout": { - Description: "The number of seconds to wait for more data from a connection before aborting the read.", - Optional: true, - Type: schema.TypeInt, - }, - "net_write_timeout": { - Description: "The number of seconds to wait for a block to be written to a connection before aborting the write.", - Optional: true, - Type: schema.TypeInt, - }, - "slow_query_log": { - Description: "Slow query log enables capturing of slow queries. Setting slow_query_log to false also truncates the mysql.slow_log table. Default is off.", - Optional: true, - Type: schema.TypeBool, - }, - "sort_buffer_size": { - Description: "Sort buffer size in bytes for ORDER BY optimization. Default is 262144 (256K).", - Optional: true, - Type: schema.TypeInt, - }, - "sql_mode": { - Description: "Global SQL mode. Set to empty to use MySQL server defaults. When creating a new service and not setting this field Aiven default SQL mode (strict, SQL standard compliant) will be assigned.", - Optional: true, - Type: schema.TypeString, - }, - "sql_require_primary_key": { - Description: "Require primary key to be defined for new tables or old tables modified with ALTER TABLE and fail if missing. It is recommended to always have primary keys because various functionality may break if any large table is missing them.", - Optional: true, - Type: schema.TypeBool, - }, - "tmp_table_size": { - Description: "Limits the size of internal in-memory tables. Also set max_heap_table_size. Default is 16777216 (16M).", - Optional: true, - Type: schema.TypeInt, - }, - "wait_timeout": { - Description: "The number of seconds the server waits for activity on a noninteractive connection before closing it.", - Optional: true, - Type: schema.TypeInt, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "mysql_version": { - Description: "MySQL major version.", - Optional: true, - Type: schema.TypeString, - }, - "private_access": { - Description: "Allow access to selected service ports from private networks.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "mysql": { - Description: "Allow clients to connect to mysql with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "mysqlx": { - Description: "Allow clients to connect to mysqlx with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "mysql": { - Description: "Allow clients to connect to mysql with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "mysqlx": { - Description: "Allow clients to connect to mysqlx with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "privatelink_access": { - Description: "Allow access to selected service components through Privatelink.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "mysql": { - Description: "Enable mysql.", - Optional: true, - Type: schema.TypeBool, - }, - "mysqlx": { - Description: "Enable mysqlx.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Enable prometheus.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "mysql": { - Description: "Enable mysql.", - Optional: true, - Type: schema.TypeBool, - }, - "mysqlx": { - Description: "Enable mysqlx.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Enable prometheus.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "project_to_fork_from": { - Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "public_access": { - Description: "Allow access to selected service ports from the public Internet.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "mysql": { - Description: "Allow clients to connect to mysql from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "mysqlx": { - Description: "Allow clients to connect to mysqlx from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "mysql": { - Description: "Allow clients to connect to mysql from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "mysqlx": { - Description: "Allow clients to connect to mysqlx from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "recovery_target_time": { - Description: "Recovery target time when forking a service. This has effect only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "service_log": { - Description: "Store logs for the service so that they are available in the HTTP API and console.", - Optional: true, - Type: schema.TypeBool, - }, - "service_to_fork_from": { - Description: "Name of another service to fork from. This has effect only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "static_ips": { - Description: "Use static public IP addresses.", - Optional: true, - Type: schema.TypeBool, - }, - } - - return &schema.Schema{ - Description: "Mysql user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// ServiceTypeOpensearch is a generated function returning the schema of the opensearch ServiceType. -func ServiceTypeOpensearch() *schema.Schema { - s := map[string]*schema.Schema{ - "additional_backup_regions": { - Description: "Additional Cloud Regions for Backup Replication.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "custom_domain": { - Description: "Serve the web frontend using a custom CNAME pointing to the Aiven DNS name.", - Optional: true, - Type: schema.TypeString, - }, - "disable_replication_factor_adjustment": { - Deprecated: "Usage of this field is discouraged.", - Description: "Disable automatic replication factor adjustment for multi-node services. By default, Aiven ensures all indexes are replicated at least to two nodes. Note: Due to potential data loss in case of losing a service node, this setting can no longer be activated.", - Optional: true, - Type: schema.TypeBool, - }, - "index_patterns": { - Description: "Index patterns.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "max_index_count": { - Description: "Maximum number of indexes to keep.", - Required: true, - Type: schema.TypeInt, - }, - "pattern": { - Description: "fnmatch pattern.", - Required: true, - Type: schema.TypeString, - }, - "sorting_algorithm": { - Default: "creation_date", - Description: "Deletion sorting algorithm. The default value is `creation_date`.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 512, - Optional: true, - Type: schema.TypeList, - }, - "index_template": { - Description: "Template settings for all new indexes.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "mapping_nested_objects_limit": { - Description: "The maximum number of nested JSON objects that a single document can contain across all nested types. This limit helps to prevent out of memory errors when a document contains too many nested objects. Default is 10000.", - Optional: true, - Type: schema.TypeInt, - }, - "number_of_replicas": { - Description: "The number of replicas each primary shard has.", - Optional: true, - Type: schema.TypeInt, - }, - "number_of_shards": { - Description: "The number of primary shards that an index should have.", - Optional: true, - Type: schema.TypeInt, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "mapping_nested_objects_limit": { - Description: "The maximum number of nested JSON objects that a single document can contain across all nested types. This limit helps to prevent out of memory errors when a document contains too many nested objects. Default is 10000.", - Optional: true, - Type: schema.TypeInt, - }, - "number_of_replicas": { - Description: "The number of replicas each primary shard has.", - Optional: true, - Type: schema.TypeInt, - }, - "number_of_shards": { - Description: "The number of primary shards that an index should have.", - Optional: true, - Type: schema.TypeInt, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter": { - Deprecated: "This will be removed in v5.0.0 and replaced with ip_filter_string instead.", - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_object": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "description": { - Description: "Description for IP filter list entry.", - Optional: true, - Type: schema.TypeString, - }, - "network": { - Description: "CIDR address block.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_string": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "keep_index_refresh_interval": { - Description: "Aiven automation resets index.refresh_interval to default value for every index to be sure that indices are always visible to search. If it doesn't fit your case, you can disable this by setting up this flag to true.", - Optional: true, - Type: schema.TypeBool, - }, - "max_index_count": { - Default: "0", - Deprecated: "Usage of this field is discouraged.", - Description: "Use index_patterns instead. The default value is `0`.", - Optional: true, - Type: schema.TypeInt, - }, - "openid": { - Description: "OpenSearch OpenID Connect Configuration.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "client_id": { - Description: "The ID of the OpenID Connect client configured in your IdP. Required.", - Required: true, - Type: schema.TypeString, - }, - "client_secret": { - Description: "The client secret of the OpenID Connect client configured in your IdP. Required.", - Required: true, - Type: schema.TypeString, - }, - "connect_url": { - Description: "The URL of your IdP where the Security plugin can find the OpenID Connect metadata/configuration settings.", - Required: true, - Type: schema.TypeString, - }, - "enabled": { - Description: "Enables or disables OpenID Connect authentication for OpenSearch. When enabled, users can authenticate using OpenID Connect with an Identity Provider. The default value is `true`.", - Required: true, - Type: schema.TypeBool, - }, - "header": { - Default: "Authorization", - Description: "HTTP header name of the JWT token. Optional. Default is Authorization. The default value is `Authorization`.", - Optional: true, - Type: schema.TypeString, - }, - "jwt_header": { - Description: "The HTTP header that stores the token. Typically the Authorization header with the Bearer schema: Authorization: Bearer . Optional. Default is Authorization.", - Optional: true, - Type: schema.TypeString, - }, - "jwt_url_parameter": { - Description: "If the token is not transmitted in the HTTP header, but as an URL parameter, define the name of the parameter here. Optional.", - Optional: true, - Type: schema.TypeString, - }, - "refresh_rate_limit_count": { - Default: "10", - Description: "The maximum number of unknown key IDs in the time frame. Default is 10. Optional. The default value is `10`.", - Optional: true, - Type: schema.TypeInt, - }, - "refresh_rate_limit_time_window_ms": { - Default: "10000", - Description: "The time frame to use when checking the maximum number of unknown key IDs, in milliseconds. Optional.Default is 10000 (10 seconds). The default value is `10000`.", - Optional: true, - Type: schema.TypeInt, - }, - "roles_key": { - Description: "The key in the JSON payload that stores the user’s roles. The value of this key must be a comma-separated list of roles. Required only if you want to use roles in the JWT.", - Optional: true, - Type: schema.TypeString, - }, - "scope": { - Description: "The scope of the identity token issued by the IdP. Optional. Default is openid profile email address phone.", - Optional: true, - Type: schema.TypeString, - }, - "subject_key": { - Description: "The key in the JSON payload that stores the user’s name. If not defined, the subject registered claim is used. Most IdP providers use the preferred_username claim. Optional.", - Optional: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "client_id": { - Description: "The ID of the OpenID Connect client configured in your IdP. Required.", - Required: true, - Type: schema.TypeString, - }, - "client_secret": { - Description: "The client secret of the OpenID Connect client configured in your IdP. Required.", - Required: true, - Type: schema.TypeString, - }, - "connect_url": { - Description: "The URL of your IdP where the Security plugin can find the OpenID Connect metadata/configuration settings.", - Required: true, - Type: schema.TypeString, - }, - "enabled": { - Description: "Enables or disables OpenID Connect authentication for OpenSearch. When enabled, users can authenticate using OpenID Connect with an Identity Provider. The default value is `true`.", - Required: true, - Type: schema.TypeBool, - }, - "header": { - Default: "Authorization", - Description: "HTTP header name of the JWT token. Optional. Default is Authorization. The default value is `Authorization`.", - Optional: true, - Type: schema.TypeString, - }, - "jwt_header": { - Description: "The HTTP header that stores the token. Typically the Authorization header with the Bearer schema: Authorization: Bearer . Optional. Default is Authorization.", - Optional: true, - Type: schema.TypeString, - }, - "jwt_url_parameter": { - Description: "If the token is not transmitted in the HTTP header, but as an URL parameter, define the name of the parameter here. Optional.", - Optional: true, - Type: schema.TypeString, - }, - "refresh_rate_limit_count": { - Default: "10", - Description: "The maximum number of unknown key IDs in the time frame. Default is 10. Optional. The default value is `10`.", - Optional: true, - Type: schema.TypeInt, - }, - "refresh_rate_limit_time_window_ms": { - Default: "10000", - Description: "The time frame to use when checking the maximum number of unknown key IDs, in milliseconds. Optional.Default is 10000 (10 seconds). The default value is `10000`.", - Optional: true, - Type: schema.TypeInt, - }, - "roles_key": { - Description: "The key in the JSON payload that stores the user’s roles. The value of this key must be a comma-separated list of roles. Required only if you want to use roles in the JWT.", - Optional: true, - Type: schema.TypeString, - }, - "scope": { - Description: "The scope of the identity token issued by the IdP. Optional. Default is openid profile email address phone.", - Optional: true, - Type: schema.TypeString, - }, - "subject_key": { - Description: "The key in the JSON payload that stores the user’s name. If not defined, the subject registered claim is used. Most IdP providers use the preferred_username claim. Optional.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "opensearch": { - Description: "OpenSearch settings.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "action_auto_create_index_enabled": { - Description: "Explicitly allow or block automatic creation of indices. Defaults to true.", - Optional: true, - Type: schema.TypeBool, - }, - "action_destructive_requires_name": { - Description: "Require explicit index names when deleting.", - Optional: true, - Type: schema.TypeBool, - }, - "auth_failure_listeners": { - Description: "Opensearch Security Plugin Settings.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "internal_authentication_backend_limiting": { - Description: ".", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "allowed_tries": { - Description: "The number of login attempts allowed before login is blocked.", - Optional: true, - Type: schema.TypeInt, - }, - "authentication_backend": { - Description: "The internal backend. Enter `internal`.", - Optional: true, - Type: schema.TypeString, - }, - "block_expiry_seconds": { - Description: "The duration of time that login remains blocked after a failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "max_blocked_clients": { - Description: "The maximum number of blocked IP addresses.", - Optional: true, - Type: schema.TypeInt, - }, - "max_tracked_clients": { - Description: "The maximum number of tracked IP addresses that have failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "time_window_seconds": { - Description: "The window of time in which the value for `allowed_tries` is enforced.", - Optional: true, - Type: schema.TypeInt, - }, - "type": { - Description: "The type of rate limiting.", - Optional: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "allowed_tries": { - Description: "The number of login attempts allowed before login is blocked.", - Optional: true, - Type: schema.TypeInt, - }, - "authentication_backend": { - Description: "The internal backend. Enter `internal`.", - Optional: true, - Type: schema.TypeString, - }, - "block_expiry_seconds": { - Description: "The duration of time that login remains blocked after a failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "max_blocked_clients": { - Description: "The maximum number of blocked IP addresses.", - Optional: true, - Type: schema.TypeInt, - }, - "max_tracked_clients": { - Description: "The maximum number of tracked IP addresses that have failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "time_window_seconds": { - Description: "The window of time in which the value for `allowed_tries` is enforced.", - Optional: true, - Type: schema.TypeInt, - }, - "type": { - Description: "The type of rate limiting.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "ip_rate_limiting": { - Description: "IP address rate limiting settings.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "allowed_tries": { - Description: "The number of login attempts allowed before login is blocked.", - Optional: true, - Type: schema.TypeInt, - }, - "block_expiry_seconds": { - Description: "The duration of time that login remains blocked after a failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "max_blocked_clients": { - Description: "The maximum number of blocked IP addresses.", - Optional: true, - Type: schema.TypeInt, - }, - "max_tracked_clients": { - Description: "The maximum number of tracked IP addresses that have failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "time_window_seconds": { - Description: "The window of time in which the value for `allowed_tries` is enforced.", - Optional: true, - Type: schema.TypeInt, - }, - "type": { - Description: "The type of rate limiting.", - Optional: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "allowed_tries": { - Description: "The number of login attempts allowed before login is blocked.", - Optional: true, - Type: schema.TypeInt, - }, - "block_expiry_seconds": { - Description: "The duration of time that login remains blocked after a failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "max_blocked_clients": { - Description: "The maximum number of blocked IP addresses.", - Optional: true, - Type: schema.TypeInt, - }, - "max_tracked_clients": { - Description: "The maximum number of tracked IP addresses that have failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "time_window_seconds": { - Description: "The window of time in which the value for `allowed_tries` is enforced.", - Optional: true, - Type: schema.TypeInt, - }, - "type": { - Description: "The type of rate limiting.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "internal_authentication_backend_limiting": { - Description: ".", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "allowed_tries": { - Description: "The number of login attempts allowed before login is blocked.", - Optional: true, - Type: schema.TypeInt, - }, - "authentication_backend": { - Description: "The internal backend. Enter `internal`.", - Optional: true, - Type: schema.TypeString, - }, - "block_expiry_seconds": { - Description: "The duration of time that login remains blocked after a failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "max_blocked_clients": { - Description: "The maximum number of blocked IP addresses.", - Optional: true, - Type: schema.TypeInt, - }, - "max_tracked_clients": { - Description: "The maximum number of tracked IP addresses that have failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "time_window_seconds": { - Description: "The window of time in which the value for `allowed_tries` is enforced.", - Optional: true, - Type: schema.TypeInt, - }, - "type": { - Description: "The type of rate limiting.", - Optional: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "allowed_tries": { - Description: "The number of login attempts allowed before login is blocked.", - Optional: true, - Type: schema.TypeInt, - }, - "authentication_backend": { - Description: "The internal backend. Enter `internal`.", - Optional: true, - Type: schema.TypeString, - }, - "block_expiry_seconds": { - Description: "The duration of time that login remains blocked after a failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "max_blocked_clients": { - Description: "The maximum number of blocked IP addresses.", - Optional: true, - Type: schema.TypeInt, - }, - "max_tracked_clients": { - Description: "The maximum number of tracked IP addresses that have failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "time_window_seconds": { - Description: "The window of time in which the value for `allowed_tries` is enforced.", - Optional: true, - Type: schema.TypeInt, - }, - "type": { - Description: "The type of rate limiting.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "ip_rate_limiting": { - Description: "IP address rate limiting settings.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "allowed_tries": { - Description: "The number of login attempts allowed before login is blocked.", - Optional: true, - Type: schema.TypeInt, - }, - "block_expiry_seconds": { - Description: "The duration of time that login remains blocked after a failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "max_blocked_clients": { - Description: "The maximum number of blocked IP addresses.", - Optional: true, - Type: schema.TypeInt, - }, - "max_tracked_clients": { - Description: "The maximum number of tracked IP addresses that have failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "time_window_seconds": { - Description: "The window of time in which the value for `allowed_tries` is enforced.", - Optional: true, - Type: schema.TypeInt, - }, - "type": { - Description: "The type of rate limiting.", - Optional: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "allowed_tries": { - Description: "The number of login attempts allowed before login is blocked.", - Optional: true, - Type: schema.TypeInt, - }, - "block_expiry_seconds": { - Description: "The duration of time that login remains blocked after a failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "max_blocked_clients": { - Description: "The maximum number of blocked IP addresses.", - Optional: true, - Type: schema.TypeInt, - }, - "max_tracked_clients": { - Description: "The maximum number of tracked IP addresses that have failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "time_window_seconds": { - Description: "The window of time in which the value for `allowed_tries` is enforced.", - Optional: true, - Type: schema.TypeInt, - }, - "type": { - Description: "The type of rate limiting.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "cluster_max_shards_per_node": { - Description: "Controls the number of shards allowed in the cluster per data node.", - Optional: true, - Type: schema.TypeInt, - }, - "cluster_routing_allocation_node_concurrent_recoveries": { - Description: "How many concurrent incoming/outgoing shard recoveries (normally replicas) are allowed to happen on a node. Defaults to 2.", - Optional: true, - Type: schema.TypeInt, - }, - "email_sender_name": { - Description: "This should be identical to the Sender name defined in Opensearch dashboards.", - Optional: true, - Type: schema.TypeString, - }, - "email_sender_password": { - Description: "Sender password for Opensearch alerts to authenticate with SMTP server.", - Optional: true, - Sensitive: true, - Type: schema.TypeString, - }, - "email_sender_username": { - Description: "Sender username for Opensearch alerts.", - Optional: true, - Type: schema.TypeString, - }, - "enable_security_audit": { - Default: false, - Description: "Enable/Disable security audit. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - "http_max_content_length": { - Description: "Maximum content length for HTTP requests to the OpenSearch HTTP API, in bytes.", - Optional: true, - Type: schema.TypeInt, - }, - "http_max_header_size": { - Description: "The max size of allowed headers, in bytes.", - Optional: true, - Type: schema.TypeInt, - }, - "http_max_initial_line_length": { - Description: "The max length of an HTTP URL, in bytes.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_fielddata_cache_size": { - Description: "Relative amount. Maximum amount of heap memory used for field data cache. This is an expert setting; decreasing the value too much will increase overhead of loading field data; too much memory used for field data cache will decrease amount of heap available for other operations.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_memory_index_buffer_size": { - Description: "Percentage value. Default is 10%. Total amount of heap used for indexing buffer, before writing segments to disk. This is an expert setting. Too low value will slow down indexing; too high value will increase indexing performance but causes performance issues for query performance.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_memory_max_index_buffer_size": { - Description: "Absolute value. Default is unbound. Doesn't work without indices.memory.index_buffer_size. Maximum amount of heap used for query cache, an absolute indices.memory.index_buffer_size maximum hard limit.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_memory_min_index_buffer_size": { - Description: "Absolute value. Default is 48mb. Doesn't work without indices.memory.index_buffer_size. Minimum amount of heap used for query cache, an absolute indices.memory.index_buffer_size minimal hard limit.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_queries_cache_size": { - Description: "Percentage value. Default is 10%. Maximum amount of heap used for query cache. This is an expert setting. Too low value will decrease query performance and increase performance for other operations; too high value will cause issues with other OpenSearch functionality.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_query_bool_max_clause_count": { - Description: "Maximum number of clauses Lucene BooleanQuery can have. The default value (1024) is relatively high, and increasing it may cause performance issues. Investigate other approaches first before increasing this value.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_recovery_max_bytes_per_sec": { - Description: "Limits total inbound and outbound recovery traffic for each node. Applies to both peer recoveries as well as snapshot recoveries (i.e., restores from a snapshot). Defaults to 40mb.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_recovery_max_concurrent_file_chunks": { - Description: "Number of file chunks sent in parallel for each recovery. Defaults to 2.", - Optional: true, - Type: schema.TypeInt, - }, - "ism_enabled": { - Default: true, - Description: "Specifies whether ISM is enabled or not. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "ism_history_enabled": { - Default: true, - Description: "Specifies whether audit history is enabled or not. The logs from ISM are automatically indexed to a logs document. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "ism_history_max_age": { - Default: "24", - Description: "The maximum age before rolling over the audit history index in hours. The default value is `24`.", - Optional: true, - Type: schema.TypeInt, - }, - "ism_history_max_docs": { - Default: "2500000", - Description: "The maximum number of documents before rolling over the audit history index. The default value is `2500000`.", - Optional: true, - Type: schema.TypeInt, - }, - "ism_history_rollover_check_period": { - Default: "8", - Description: "The time between rollover checks for the audit history index in hours. The default value is `8`.", - Optional: true, - Type: schema.TypeInt, - }, - "ism_history_rollover_retention_period": { - Default: "30", - Description: "How long audit history indices are kept in days. The default value is `30`.", - Optional: true, - Type: schema.TypeInt, - }, - "override_main_response_version": { - Description: "Compatibility mode sets OpenSearch to report its version as 7.10 so clients continue to work. Default is false.", - Optional: true, - Type: schema.TypeBool, - }, - "plugins_alerting_filter_by_backend_roles": { - Description: "Enable or disable filtering of alerting by backend roles. Requires Security plugin. Defaults to false.", - Optional: true, - Type: schema.TypeBool, - }, - "reindex_remote_whitelist": { - Description: "Whitelisted addresses for reindexing. Changing this value will cause all OpenSearch instances to restart.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 32, - Optional: true, - Type: schema.TypeList, - }, - "script_max_compilations_rate": { - Description: "Script compilation circuit breaker limits the number of inline script compilations within a period of time. Default is use-context.", - Optional: true, - Type: schema.TypeString, - }, - "search_max_buckets": { - Description: "Maximum number of aggregation buckets allowed in a single response. OpenSearch default value is used when this is not defined.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_analyze_queue_size": { - Description: "Size for the thread pool queue. See documentation for exact details.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_analyze_size": { - Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_force_merge_size": { - Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_get_queue_size": { - Description: "Size for the thread pool queue. See documentation for exact details.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_get_size": { - Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_search_queue_size": { - Description: "Size for the thread pool queue. See documentation for exact details.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_search_size": { - Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_search_throttled_queue_size": { - Description: "Size for the thread pool queue. See documentation for exact details.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_search_throttled_size": { - Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_write_queue_size": { - Description: "Size for the thread pool queue. See documentation for exact details.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_write_size": { - Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", - Optional: true, - Type: schema.TypeInt, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "action_auto_create_index_enabled": { - Description: "Explicitly allow or block automatic creation of indices. Defaults to true.", - Optional: true, - Type: schema.TypeBool, - }, - "action_destructive_requires_name": { - Description: "Require explicit index names when deleting.", - Optional: true, - Type: schema.TypeBool, - }, - "auth_failure_listeners": { - Description: "Opensearch Security Plugin Settings.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "internal_authentication_backend_limiting": { - Description: ".", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "allowed_tries": { - Description: "The number of login attempts allowed before login is blocked.", - Optional: true, - Type: schema.TypeInt, - }, - "authentication_backend": { - Description: "The internal backend. Enter `internal`.", - Optional: true, - Type: schema.TypeString, - }, - "block_expiry_seconds": { - Description: "The duration of time that login remains blocked after a failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "max_blocked_clients": { - Description: "The maximum number of blocked IP addresses.", - Optional: true, - Type: schema.TypeInt, - }, - "max_tracked_clients": { - Description: "The maximum number of tracked IP addresses that have failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "time_window_seconds": { - Description: "The window of time in which the value for `allowed_tries` is enforced.", - Optional: true, - Type: schema.TypeInt, - }, - "type": { - Description: "The type of rate limiting.", - Optional: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "allowed_tries": { - Description: "The number of login attempts allowed before login is blocked.", - Optional: true, - Type: schema.TypeInt, - }, - "authentication_backend": { - Description: "The internal backend. Enter `internal`.", - Optional: true, - Type: schema.TypeString, - }, - "block_expiry_seconds": { - Description: "The duration of time that login remains blocked after a failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "max_blocked_clients": { - Description: "The maximum number of blocked IP addresses.", - Optional: true, - Type: schema.TypeInt, - }, - "max_tracked_clients": { - Description: "The maximum number of tracked IP addresses that have failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "time_window_seconds": { - Description: "The window of time in which the value for `allowed_tries` is enforced.", - Optional: true, - Type: schema.TypeInt, - }, - "type": { - Description: "The type of rate limiting.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "ip_rate_limiting": { - Description: "IP address rate limiting settings.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "allowed_tries": { - Description: "The number of login attempts allowed before login is blocked.", - Optional: true, - Type: schema.TypeInt, - }, - "block_expiry_seconds": { - Description: "The duration of time that login remains blocked after a failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "max_blocked_clients": { - Description: "The maximum number of blocked IP addresses.", - Optional: true, - Type: schema.TypeInt, - }, - "max_tracked_clients": { - Description: "The maximum number of tracked IP addresses that have failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "time_window_seconds": { - Description: "The window of time in which the value for `allowed_tries` is enforced.", - Optional: true, - Type: schema.TypeInt, - }, - "type": { - Description: "The type of rate limiting.", - Optional: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "allowed_tries": { - Description: "The number of login attempts allowed before login is blocked.", - Optional: true, - Type: schema.TypeInt, - }, - "block_expiry_seconds": { - Description: "The duration of time that login remains blocked after a failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "max_blocked_clients": { - Description: "The maximum number of blocked IP addresses.", - Optional: true, - Type: schema.TypeInt, - }, - "max_tracked_clients": { - Description: "The maximum number of tracked IP addresses that have failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "time_window_seconds": { - Description: "The window of time in which the value for `allowed_tries` is enforced.", - Optional: true, - Type: schema.TypeInt, - }, - "type": { - Description: "The type of rate limiting.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "internal_authentication_backend_limiting": { - Description: ".", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "allowed_tries": { - Description: "The number of login attempts allowed before login is blocked.", - Optional: true, - Type: schema.TypeInt, - }, - "authentication_backend": { - Description: "The internal backend. Enter `internal`.", - Optional: true, - Type: schema.TypeString, - }, - "block_expiry_seconds": { - Description: "The duration of time that login remains blocked after a failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "max_blocked_clients": { - Description: "The maximum number of blocked IP addresses.", - Optional: true, - Type: schema.TypeInt, - }, - "max_tracked_clients": { - Description: "The maximum number of tracked IP addresses that have failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "time_window_seconds": { - Description: "The window of time in which the value for `allowed_tries` is enforced.", - Optional: true, - Type: schema.TypeInt, - }, - "type": { - Description: "The type of rate limiting.", - Optional: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "allowed_tries": { - Description: "The number of login attempts allowed before login is blocked.", - Optional: true, - Type: schema.TypeInt, - }, - "authentication_backend": { - Description: "The internal backend. Enter `internal`.", - Optional: true, - Type: schema.TypeString, - }, - "block_expiry_seconds": { - Description: "The duration of time that login remains blocked after a failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "max_blocked_clients": { - Description: "The maximum number of blocked IP addresses.", - Optional: true, - Type: schema.TypeInt, - }, - "max_tracked_clients": { - Description: "The maximum number of tracked IP addresses that have failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "time_window_seconds": { - Description: "The window of time in which the value for `allowed_tries` is enforced.", - Optional: true, - Type: schema.TypeInt, - }, - "type": { - Description: "The type of rate limiting.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "ip_rate_limiting": { - Description: "IP address rate limiting settings.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "allowed_tries": { - Description: "The number of login attempts allowed before login is blocked.", - Optional: true, - Type: schema.TypeInt, - }, - "block_expiry_seconds": { - Description: "The duration of time that login remains blocked after a failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "max_blocked_clients": { - Description: "The maximum number of blocked IP addresses.", - Optional: true, - Type: schema.TypeInt, - }, - "max_tracked_clients": { - Description: "The maximum number of tracked IP addresses that have failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "time_window_seconds": { - Description: "The window of time in which the value for `allowed_tries` is enforced.", - Optional: true, - Type: schema.TypeInt, - }, - "type": { - Description: "The type of rate limiting.", - Optional: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "allowed_tries": { - Description: "The number of login attempts allowed before login is blocked.", - Optional: true, - Type: schema.TypeInt, - }, - "block_expiry_seconds": { - Description: "The duration of time that login remains blocked after a failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "max_blocked_clients": { - Description: "The maximum number of blocked IP addresses.", - Optional: true, - Type: schema.TypeInt, - }, - "max_tracked_clients": { - Description: "The maximum number of tracked IP addresses that have failed login.", - Optional: true, - Type: schema.TypeInt, - }, - "time_window_seconds": { - Description: "The window of time in which the value for `allowed_tries` is enforced.", - Optional: true, - Type: schema.TypeInt, - }, - "type": { - Description: "The type of rate limiting.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "cluster_max_shards_per_node": { - Description: "Controls the number of shards allowed in the cluster per data node.", - Optional: true, - Type: schema.TypeInt, - }, - "cluster_routing_allocation_node_concurrent_recoveries": { - Description: "How many concurrent incoming/outgoing shard recoveries (normally replicas) are allowed to happen on a node. Defaults to 2.", - Optional: true, - Type: schema.TypeInt, - }, - "email_sender_name": { - Description: "This should be identical to the Sender name defined in Opensearch dashboards.", - Optional: true, - Type: schema.TypeString, - }, - "email_sender_password": { - Description: "Sender password for Opensearch alerts to authenticate with SMTP server.", - Optional: true, - Sensitive: true, - Type: schema.TypeString, - }, - "email_sender_username": { - Description: "Sender username for Opensearch alerts.", - Optional: true, - Type: schema.TypeString, - }, - "enable_security_audit": { - Default: false, - Description: "Enable/Disable security audit. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - "http_max_content_length": { - Description: "Maximum content length for HTTP requests to the OpenSearch HTTP API, in bytes.", - Optional: true, - Type: schema.TypeInt, - }, - "http_max_header_size": { - Description: "The max size of allowed headers, in bytes.", - Optional: true, - Type: schema.TypeInt, - }, - "http_max_initial_line_length": { - Description: "The max length of an HTTP URL, in bytes.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_fielddata_cache_size": { - Description: "Relative amount. Maximum amount of heap memory used for field data cache. This is an expert setting; decreasing the value too much will increase overhead of loading field data; too much memory used for field data cache will decrease amount of heap available for other operations.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_memory_index_buffer_size": { - Description: "Percentage value. Default is 10%. Total amount of heap used for indexing buffer, before writing segments to disk. This is an expert setting. Too low value will slow down indexing; too high value will increase indexing performance but causes performance issues for query performance.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_memory_max_index_buffer_size": { - Description: "Absolute value. Default is unbound. Doesn't work without indices.memory.index_buffer_size. Maximum amount of heap used for query cache, an absolute indices.memory.index_buffer_size maximum hard limit.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_memory_min_index_buffer_size": { - Description: "Absolute value. Default is 48mb. Doesn't work without indices.memory.index_buffer_size. Minimum amount of heap used for query cache, an absolute indices.memory.index_buffer_size minimal hard limit.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_queries_cache_size": { - Description: "Percentage value. Default is 10%. Maximum amount of heap used for query cache. This is an expert setting. Too low value will decrease query performance and increase performance for other operations; too high value will cause issues with other OpenSearch functionality.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_query_bool_max_clause_count": { - Description: "Maximum number of clauses Lucene BooleanQuery can have. The default value (1024) is relatively high, and increasing it may cause performance issues. Investigate other approaches first before increasing this value.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_recovery_max_bytes_per_sec": { - Description: "Limits total inbound and outbound recovery traffic for each node. Applies to both peer recoveries as well as snapshot recoveries (i.e., restores from a snapshot). Defaults to 40mb.", - Optional: true, - Type: schema.TypeInt, - }, - "indices_recovery_max_concurrent_file_chunks": { - Description: "Number of file chunks sent in parallel for each recovery. Defaults to 2.", - Optional: true, - Type: schema.TypeInt, - }, - "ism_enabled": { - Default: true, - Description: "Specifies whether ISM is enabled or not. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "ism_history_enabled": { - Default: true, - Description: "Specifies whether audit history is enabled or not. The logs from ISM are automatically indexed to a logs document. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "ism_history_max_age": { - Default: "24", - Description: "The maximum age before rolling over the audit history index in hours. The default value is `24`.", - Optional: true, - Type: schema.TypeInt, - }, - "ism_history_max_docs": { - Default: "2500000", - Description: "The maximum number of documents before rolling over the audit history index. The default value is `2500000`.", - Optional: true, - Type: schema.TypeInt, - }, - "ism_history_rollover_check_period": { - Default: "8", - Description: "The time between rollover checks for the audit history index in hours. The default value is `8`.", - Optional: true, - Type: schema.TypeInt, - }, - "ism_history_rollover_retention_period": { - Default: "30", - Description: "How long audit history indices are kept in days. The default value is `30`.", - Optional: true, - Type: schema.TypeInt, - }, - "override_main_response_version": { - Description: "Compatibility mode sets OpenSearch to report its version as 7.10 so clients continue to work. Default is false.", - Optional: true, - Type: schema.TypeBool, - }, - "plugins_alerting_filter_by_backend_roles": { - Description: "Enable or disable filtering of alerting by backend roles. Requires Security plugin. Defaults to false.", - Optional: true, - Type: schema.TypeBool, - }, - "reindex_remote_whitelist": { - Description: "Whitelisted addresses for reindexing. Changing this value will cause all OpenSearch instances to restart.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 32, - Optional: true, - Type: schema.TypeList, - }, - "script_max_compilations_rate": { - Description: "Script compilation circuit breaker limits the number of inline script compilations within a period of time. Default is use-context.", - Optional: true, - Type: schema.TypeString, - }, - "search_max_buckets": { - Description: "Maximum number of aggregation buckets allowed in a single response. OpenSearch default value is used when this is not defined.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_analyze_queue_size": { - Description: "Size for the thread pool queue. See documentation for exact details.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_analyze_size": { - Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_force_merge_size": { - Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_get_queue_size": { - Description: "Size for the thread pool queue. See documentation for exact details.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_get_size": { - Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_search_queue_size": { - Description: "Size for the thread pool queue. See documentation for exact details.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_search_size": { - Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_search_throttled_queue_size": { - Description: "Size for the thread pool queue. See documentation for exact details.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_search_throttled_size": { - Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_write_queue_size": { - Description: "Size for the thread pool queue. See documentation for exact details.", - Optional: true, - Type: schema.TypeInt, - }, - "thread_pool_write_size": { - Description: "Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.", - Optional: true, - Type: schema.TypeInt, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "opensearch_dashboards": { - Description: "OpenSearch Dashboards settings.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "enabled": { - Default: true, - Description: "Enable or disable OpenSearch Dashboards. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "max_old_space_size": { - Default: "128", - Description: "Limits the maximum amount of memory (in MiB) the OpenSearch Dashboards process can use. This sets the max_old_space_size option of the nodejs running the OpenSearch Dashboards. Note: the memory reserved by OpenSearch Dashboards is not available for OpenSearch. The default value is `128`.", - Optional: true, - Type: schema.TypeInt, - }, - "opensearch_request_timeout": { - Default: "30000", - Description: "Timeout in milliseconds for requests made by OpenSearch Dashboards towards OpenSearch. The default value is `30000`.", - Optional: true, - Type: schema.TypeInt, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "enabled": { - Default: true, - Description: "Enable or disable OpenSearch Dashboards. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "max_old_space_size": { - Default: "128", - Description: "Limits the maximum amount of memory (in MiB) the OpenSearch Dashboards process can use. This sets the max_old_space_size option of the nodejs running the OpenSearch Dashboards. Note: the memory reserved by OpenSearch Dashboards is not available for OpenSearch. The default value is `128`.", - Optional: true, - Type: schema.TypeInt, - }, - "opensearch_request_timeout": { - Default: "30000", - Description: "Timeout in milliseconds for requests made by OpenSearch Dashboards towards OpenSearch. The default value is `30000`.", - Optional: true, - Type: schema.TypeInt, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "opensearch_version": { - Description: "OpenSearch major version.", - Optional: true, - Type: schema.TypeString, - }, - "private_access": { - Description: "Allow access to selected service ports from private networks.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "opensearch": { - Description: "Allow clients to connect to opensearch with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "opensearch_dashboards": { - Description: "Allow clients to connect to opensearch_dashboards with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "opensearch": { - Description: "Allow clients to connect to opensearch with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "opensearch_dashboards": { - Description: "Allow clients to connect to opensearch_dashboards with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "privatelink_access": { - Description: "Allow access to selected service components through Privatelink.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "opensearch": { - Description: "Enable opensearch.", - Optional: true, - Type: schema.TypeBool, - }, - "opensearch_dashboards": { - Description: "Enable opensearch_dashboards.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Enable prometheus.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "opensearch": { - Description: "Enable opensearch.", - Optional: true, - Type: schema.TypeBool, - }, - "opensearch_dashboards": { - Description: "Enable opensearch_dashboards.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Enable prometheus.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "project_to_fork_from": { - Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "public_access": { - Description: "Allow access to selected service ports from the public Internet.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "opensearch": { - Description: "Allow clients to connect to opensearch from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "opensearch_dashboards": { - Description: "Allow clients to connect to opensearch_dashboards from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "opensearch": { - Description: "Allow clients to connect to opensearch from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "opensearch_dashboards": { - Description: "Allow clients to connect to opensearch_dashboards from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "recovery_basebackup_name": { - Description: "Name of the basebackup to restore in forked service.", - Optional: true, - Type: schema.TypeString, - }, - "saml": { - Description: "OpenSearch SAML configuration.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "enabled": { - Description: "Enables or disables SAML-based authentication for OpenSearch. When enabled, users can authenticate using SAML with an Identity Provider. The default value is `true`.", - Required: true, - Type: schema.TypeBool, - }, - "idp_entity_id": { - Description: "The unique identifier for the Identity Provider (IdP) entity that is used for SAML authentication. This value is typically provided by the IdP.", - Required: true, - Type: schema.TypeString, - }, - "idp_metadata_url": { - Description: "The URL of the SAML metadata for the Identity Provider (IdP). This is used to configure SAML-based authentication with the IdP.", - Required: true, - Type: schema.TypeString, - }, - "idp_pemtrustedcas_content": { - Description: "This parameter specifies the PEM-encoded root certificate authority (CA) content for the SAML identity provider (IdP) server verification. The root CA content is used to verify the SSL/TLS certificate presented by the server.", - Optional: true, - Type: schema.TypeString, - }, - "roles_key": { - Description: "Optional. Specifies the attribute in the SAML response where role information is stored, if available. Role attributes are not required for SAML authentication, but can be included in SAML assertions by most Identity Providers (IdPs) to determine user access levels or permissions.", - Optional: true, - Type: schema.TypeString, - }, - "sp_entity_id": { - Description: "The unique identifier for the Service Provider (SP) entity that is used for SAML authentication. This value is typically provided by the SP.", - Required: true, - Type: schema.TypeString, - }, - "subject_key": { - Description: "Optional. Specifies the attribute in the SAML response where the subject identifier is stored. If not configured, the NameID attribute is used by default.", - Optional: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "enabled": { - Description: "Enables or disables SAML-based authentication for OpenSearch. When enabled, users can authenticate using SAML with an Identity Provider. The default value is `true`.", - Required: true, - Type: schema.TypeBool, - }, - "idp_entity_id": { - Description: "The unique identifier for the Identity Provider (IdP) entity that is used for SAML authentication. This value is typically provided by the IdP.", - Required: true, - Type: schema.TypeString, - }, - "idp_metadata_url": { - Description: "The URL of the SAML metadata for the Identity Provider (IdP). This is used to configure SAML-based authentication with the IdP.", - Required: true, - Type: schema.TypeString, - }, - "idp_pemtrustedcas_content": { - Description: "This parameter specifies the PEM-encoded root certificate authority (CA) content for the SAML identity provider (IdP) server verification. The root CA content is used to verify the SSL/TLS certificate presented by the server.", - Optional: true, - Type: schema.TypeString, - }, - "roles_key": { - Description: "Optional. Specifies the attribute in the SAML response where role information is stored, if available. Role attributes are not required for SAML authentication, but can be included in SAML assertions by most Identity Providers (IdPs) to determine user access levels or permissions.", - Optional: true, - Type: schema.TypeString, - }, - "sp_entity_id": { - Description: "The unique identifier for the Service Provider (SP) entity that is used for SAML authentication. This value is typically provided by the SP.", - Required: true, - Type: schema.TypeString, - }, - "subject_key": { - Description: "Optional. Specifies the attribute in the SAML response where the subject identifier is stored. If not configured, the NameID attribute is used by default.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "service_log": { - Description: "Store logs for the service so that they are available in the HTTP API and console.", - Optional: true, - Type: schema.TypeBool, - }, - "service_to_fork_from": { - Description: "Name of another service to fork from. This has effect only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "static_ips": { - Description: "Use static public IP addresses.", - Optional: true, - Type: schema.TypeBool, - }, - } - - return &schema.Schema{ - Description: "Opensearch user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// ServiceTypePg is a generated function returning the schema of the pg ServiceType. -func ServiceTypePg() *schema.Schema { - s := map[string]*schema.Schema{ - "additional_backup_regions": { - Description: "Additional Cloud Regions for Backup Replication.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "admin_password": { - Description: "Custom password for admin user. Defaults to random string. This must be set only when a new service is being created.", - ForceNew: true, - Optional: true, - Sensitive: true, - Type: schema.TypeString, - }, - "admin_username": { - Description: "Custom username for admin user. This must be set only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "backup_hour": { - Description: "The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed.", - Optional: true, - Type: schema.TypeInt, - }, - "backup_minute": { - Description: "The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed.", - Optional: true, - Type: schema.TypeInt, - }, - "enable_ipv6": { - Description: "Register AAAA DNS records for the service, and allow IPv6 packets to service ports.", - Optional: true, - Type: schema.TypeBool, - }, - "ip_filter": { - Deprecated: "This will be removed in v5.0.0 and replaced with ip_filter_string instead.", - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_object": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "description": { - Description: "Description for IP filter list entry.", - Optional: true, - Type: schema.TypeString, - }, - "network": { - Description: "CIDR address block.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_string": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "migration": { - Description: "Migrate data from existing server.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "dbname": { - Description: "Database name for bootstrapping the initial connection.", - Optional: true, - Type: schema.TypeString, - }, - "host": { - Description: "Hostname or IP address of the server where to migrate data from.", - Required: true, - Type: schema.TypeString, - }, - "ignore_dbs": { - Description: "Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).", - Optional: true, - Type: schema.TypeString, - }, - "method": { - Description: "The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).", - Optional: true, - Type: schema.TypeString, - }, - "password": { - Description: "Password for authentication with the server where to migrate data from.", - Optional: true, - Sensitive: true, - Type: schema.TypeString, - }, - "port": { - Description: "Port number of the server where to migrate data from.", - Required: true, - Type: schema.TypeInt, - }, - "ssl": { - Default: true, - Description: "The server where to migrate data from is secured with SSL. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "username": { - Description: "User name for authentication with the server where to migrate data from.", - Optional: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "dbname": { - Description: "Database name for bootstrapping the initial connection.", - Optional: true, - Type: schema.TypeString, - }, - "host": { - Description: "Hostname or IP address of the server where to migrate data from.", - Required: true, - Type: schema.TypeString, - }, - "ignore_dbs": { - Description: "Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).", - Optional: true, - Type: schema.TypeString, - }, - "method": { - Description: "The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).", - Optional: true, - Type: schema.TypeString, - }, - "password": { - Description: "Password for authentication with the server where to migrate data from.", - Optional: true, - Sensitive: true, - Type: schema.TypeString, - }, - "port": { - Description: "Port number of the server where to migrate data from.", - Required: true, - Type: schema.TypeInt, - }, - "ssl": { - Default: true, - Description: "The server where to migrate data from is secured with SSL. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "username": { - Description: "User name for authentication with the server where to migrate data from.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "pg": { - Description: "postgresql.conf configuration values.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "autovacuum_analyze_scale_factor": { - Description: "Specifies a fraction of the table size to add to autovacuum_analyze_threshold when deciding whether to trigger an ANALYZE. The default is 0.2 (20% of table size).", - Optional: true, - Type: schema.TypeFloat, - }, - "autovacuum_analyze_threshold": { - Description: "Specifies the minimum number of inserted, updated or deleted tuples needed to trigger an ANALYZE in any one table. The default is 50 tuples.", - Optional: true, - Type: schema.TypeInt, - }, - "autovacuum_freeze_max_age": { - Description: "Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. Note that the system will launch autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. This parameter will cause the server to be restarted.", - Optional: true, - Type: schema.TypeInt, - }, - "autovacuum_max_workers": { - Description: "Specifies the maximum number of autovacuum processes (other than the autovacuum launcher) that may be running at any one time. The default is three. This parameter can only be set at server start.", - Optional: true, - Type: schema.TypeInt, - }, - "autovacuum_naptime": { - Description: "Specifies the minimum delay between autovacuum runs on any given database. The delay is measured in seconds, and the default is one minute.", - Optional: true, - Type: schema.TypeInt, - }, - "autovacuum_vacuum_cost_delay": { - Description: "Specifies the cost delay value that will be used in automatic VACUUM operations. If -1 is specified, the regular vacuum_cost_delay value will be used. The default value is 20 milliseconds.", - Optional: true, - Type: schema.TypeInt, - }, - "autovacuum_vacuum_cost_limit": { - Description: "Specifies the cost limit value that will be used in automatic VACUUM operations. If -1 is specified (which is the default), the regular vacuum_cost_limit value will be used.", - Optional: true, - Type: schema.TypeInt, - }, - "autovacuum_vacuum_scale_factor": { - Description: "Specifies a fraction of the table size to add to autovacuum_vacuum_threshold when deciding whether to trigger a VACUUM. The default is 0.2 (20% of table size).", - Optional: true, - Type: schema.TypeFloat, - }, - "autovacuum_vacuum_threshold": { - Description: "Specifies the minimum number of updated or deleted tuples needed to trigger a VACUUM in any one table. The default is 50 tuples.", - Optional: true, - Type: schema.TypeInt, - }, - "bgwriter_delay": { - Description: "Specifies the delay between activity rounds for the background writer in milliseconds. Default is 200.", - Optional: true, - Type: schema.TypeInt, - }, - "bgwriter_flush_after": { - Description: "Whenever more than bgwriter_flush_after bytes have been written by the background writer, attempt to force the OS to issue these writes to the underlying storage. Specified in kilobytes, default is 512. Setting of 0 disables forced writeback.", - Optional: true, - Type: schema.TypeInt, - }, - "bgwriter_lru_maxpages": { - Description: "In each round, no more than this many buffers will be written by the background writer. Setting this to zero disables background writing. Default is 100.", - Optional: true, - Type: schema.TypeInt, - }, - "bgwriter_lru_multiplier": { - Description: "The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is 2.0.", - Optional: true, - Type: schema.TypeFloat, - }, - "deadlock_timeout": { - Description: "This is the amount of time, in milliseconds, to wait on a lock before checking to see if there is a deadlock condition.", - Optional: true, - Type: schema.TypeInt, - }, - "default_toast_compression": { - Description: "Specifies the default TOAST compression method for values of compressible columns (the default is lz4).", - Optional: true, - Type: schema.TypeString, - }, - "idle_in_transaction_session_timeout": { - Description: "Time out sessions with open transactions after this number of milliseconds.", - Optional: true, - Type: schema.TypeInt, - }, - "jit": { - Description: "Controls system-wide use of Just-in-Time Compilation (JIT).", - Optional: true, - Type: schema.TypeBool, - }, - "log_autovacuum_min_duration": { - Description: "Causes each action executed by autovacuum to be logged if it ran for at least the specified number of milliseconds. Setting this to zero logs all autovacuum actions. Minus-one (the default) disables logging autovacuum actions.", - Optional: true, - Type: schema.TypeInt, - }, - "log_error_verbosity": { - Description: "Controls the amount of detail written in the server log for each message that is logged.", - Optional: true, - Type: schema.TypeString, - }, - "log_line_prefix": { - Description: "Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc.", - Optional: true, - Type: schema.TypeString, - }, - "log_min_duration_statement": { - Description: "Log statements that take more than this number of milliseconds to run, -1 disables.", - Optional: true, - Type: schema.TypeInt, - }, - "log_temp_files": { - Description: "Log statements for each temporary file created larger than this number of kilobytes, -1 disables.", - Optional: true, - Type: schema.TypeInt, - }, - "max_files_per_process": { - Description: "PostgreSQL maximum number of files that can be open per process.", - Optional: true, - Type: schema.TypeInt, - }, - "max_locks_per_transaction": { - Description: "PostgreSQL maximum locks per transaction.", - Optional: true, - Type: schema.TypeInt, - }, - "max_logical_replication_workers": { - Description: "PostgreSQL maximum logical replication workers (taken from the pool of max_parallel_workers).", - Optional: true, - Type: schema.TypeInt, - }, - "max_parallel_workers": { - Description: "Sets the maximum number of workers that the system can support for parallel queries.", - Optional: true, - Type: schema.TypeInt, - }, - "max_parallel_workers_per_gather": { - Description: "Sets the maximum number of workers that can be started by a single Gather or Gather Merge node.", - Optional: true, - Type: schema.TypeInt, - }, - "max_pred_locks_per_transaction": { - Description: "PostgreSQL maximum predicate locks per transaction.", - Optional: true, - Type: schema.TypeInt, - }, - "max_prepared_transactions": { - Description: "PostgreSQL maximum prepared transactions.", - Optional: true, - Type: schema.TypeInt, - }, - "max_replication_slots": { - Description: "PostgreSQL maximum replication slots.", - Optional: true, - Type: schema.TypeInt, - }, - "max_slot_wal_keep_size": { - Description: "PostgreSQL maximum WAL size (MB) reserved for replication slots. Default is -1 (unlimited). wal_keep_size minimum WAL size setting takes precedence over this.", - Optional: true, - Type: schema.TypeInt, - }, - "max_stack_depth": { - Description: "Maximum depth of the stack in bytes.", - Optional: true, - Type: schema.TypeInt, - }, - "max_standby_archive_delay": { - Description: "Max standby archive delay in milliseconds.", - Optional: true, - Type: schema.TypeInt, - }, - "max_standby_streaming_delay": { - Description: "Max standby streaming delay in milliseconds.", - Optional: true, - Type: schema.TypeInt, - }, - "max_wal_senders": { - Description: "PostgreSQL maximum WAL senders.", - Optional: true, - Type: schema.TypeInt, - }, - "max_worker_processes": { - Description: "Sets the maximum number of background processes that the system can support.", - Optional: true, - Type: schema.TypeInt, - }, - "pg_partman_bgw__dot__interval": { - Description: "Sets the time interval to run pg_partman's scheduled tasks.", - Optional: true, - Type: schema.TypeInt, - }, - "pg_partman_bgw__dot__role": { - Description: "Controls which role to use for pg_partman's scheduled background tasks.", - Optional: true, - Type: schema.TypeString, - }, - "pg_stat_monitor__dot__pgsm_enable_query_plan": { - Description: "Enables or disables query plan monitoring.", - Optional: true, - Type: schema.TypeBool, - }, - "pg_stat_monitor__dot__pgsm_max_buckets": { - Description: "Sets the maximum number of buckets .", - Optional: true, - Type: schema.TypeInt, - }, - "pg_stat_statements__dot__track": { - Description: "Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.", - Optional: true, - Type: schema.TypeString, - }, - "temp_file_limit": { - Description: "PostgreSQL temporary file limit in KiB, -1 for unlimited.", - Optional: true, - Type: schema.TypeInt, - }, - "timezone": { - Description: "PostgreSQL service timezone.", - Optional: true, - Type: schema.TypeString, - }, - "track_activity_query_size": { - Description: "Specifies the number of bytes reserved to track the currently executing command for each active session.", - Optional: true, - Type: schema.TypeInt, - }, - "track_commit_timestamp": { - Description: "Record commit time of transactions.", - Optional: true, - Type: schema.TypeString, - }, - "track_functions": { - Description: "Enables tracking of function call counts and time used.", - Optional: true, - Type: schema.TypeString, - }, - "track_io_timing": { - Description: "Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.", - Optional: true, - Type: schema.TypeString, - }, - "wal_sender_timeout": { - Description: "Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout.", - Optional: true, - Type: schema.TypeInt, - }, - "wal_writer_delay": { - Description: "WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance.", - Optional: true, - Type: schema.TypeInt, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "autovacuum_analyze_scale_factor": { - Description: "Specifies a fraction of the table size to add to autovacuum_analyze_threshold when deciding whether to trigger an ANALYZE. The default is 0.2 (20% of table size).", - Optional: true, - Type: schema.TypeFloat, - }, - "autovacuum_analyze_threshold": { - Description: "Specifies the minimum number of inserted, updated or deleted tuples needed to trigger an ANALYZE in any one table. The default is 50 tuples.", - Optional: true, - Type: schema.TypeInt, - }, - "autovacuum_freeze_max_age": { - Description: "Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. Note that the system will launch autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. This parameter will cause the server to be restarted.", - Optional: true, - Type: schema.TypeInt, - }, - "autovacuum_max_workers": { - Description: "Specifies the maximum number of autovacuum processes (other than the autovacuum launcher) that may be running at any one time. The default is three. This parameter can only be set at server start.", - Optional: true, - Type: schema.TypeInt, - }, - "autovacuum_naptime": { - Description: "Specifies the minimum delay between autovacuum runs on any given database. The delay is measured in seconds, and the default is one minute.", - Optional: true, - Type: schema.TypeInt, - }, - "autovacuum_vacuum_cost_delay": { - Description: "Specifies the cost delay value that will be used in automatic VACUUM operations. If -1 is specified, the regular vacuum_cost_delay value will be used. The default value is 20 milliseconds.", - Optional: true, - Type: schema.TypeInt, - }, - "autovacuum_vacuum_cost_limit": { - Description: "Specifies the cost limit value that will be used in automatic VACUUM operations. If -1 is specified (which is the default), the regular vacuum_cost_limit value will be used.", - Optional: true, - Type: schema.TypeInt, - }, - "autovacuum_vacuum_scale_factor": { - Description: "Specifies a fraction of the table size to add to autovacuum_vacuum_threshold when deciding whether to trigger a VACUUM. The default is 0.2 (20% of table size).", - Optional: true, - Type: schema.TypeFloat, - }, - "autovacuum_vacuum_threshold": { - Description: "Specifies the minimum number of updated or deleted tuples needed to trigger a VACUUM in any one table. The default is 50 tuples.", - Optional: true, - Type: schema.TypeInt, - }, - "bgwriter_delay": { - Description: "Specifies the delay between activity rounds for the background writer in milliseconds. Default is 200.", - Optional: true, - Type: schema.TypeInt, - }, - "bgwriter_flush_after": { - Description: "Whenever more than bgwriter_flush_after bytes have been written by the background writer, attempt to force the OS to issue these writes to the underlying storage. Specified in kilobytes, default is 512. Setting of 0 disables forced writeback.", - Optional: true, - Type: schema.TypeInt, - }, - "bgwriter_lru_maxpages": { - Description: "In each round, no more than this many buffers will be written by the background writer. Setting this to zero disables background writing. Default is 100.", - Optional: true, - Type: schema.TypeInt, - }, - "bgwriter_lru_multiplier": { - Description: "The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is 2.0.", - Optional: true, - Type: schema.TypeFloat, - }, - "deadlock_timeout": { - Description: "This is the amount of time, in milliseconds, to wait on a lock before checking to see if there is a deadlock condition.", - Optional: true, - Type: schema.TypeInt, - }, - "default_toast_compression": { - Description: "Specifies the default TOAST compression method for values of compressible columns (the default is lz4).", - Optional: true, - Type: schema.TypeString, - }, - "idle_in_transaction_session_timeout": { - Description: "Time out sessions with open transactions after this number of milliseconds.", - Optional: true, - Type: schema.TypeInt, - }, - "jit": { - Description: "Controls system-wide use of Just-in-Time Compilation (JIT).", - Optional: true, - Type: schema.TypeBool, - }, - "log_autovacuum_min_duration": { - Description: "Causes each action executed by autovacuum to be logged if it ran for at least the specified number of milliseconds. Setting this to zero logs all autovacuum actions. Minus-one (the default) disables logging autovacuum actions.", - Optional: true, - Type: schema.TypeInt, - }, - "log_error_verbosity": { - Description: "Controls the amount of detail written in the server log for each message that is logged.", - Optional: true, - Type: schema.TypeString, - }, - "log_line_prefix": { - Description: "Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc.", - Optional: true, - Type: schema.TypeString, - }, - "log_min_duration_statement": { - Description: "Log statements that take more than this number of milliseconds to run, -1 disables.", - Optional: true, - Type: schema.TypeInt, - }, - "log_temp_files": { - Description: "Log statements for each temporary file created larger than this number of kilobytes, -1 disables.", - Optional: true, - Type: schema.TypeInt, - }, - "max_files_per_process": { - Description: "PostgreSQL maximum number of files that can be open per process.", - Optional: true, - Type: schema.TypeInt, - }, - "max_locks_per_transaction": { - Description: "PostgreSQL maximum locks per transaction.", - Optional: true, - Type: schema.TypeInt, - }, - "max_logical_replication_workers": { - Description: "PostgreSQL maximum logical replication workers (taken from the pool of max_parallel_workers).", - Optional: true, - Type: schema.TypeInt, - }, - "max_parallel_workers": { - Description: "Sets the maximum number of workers that the system can support for parallel queries.", - Optional: true, - Type: schema.TypeInt, - }, - "max_parallel_workers_per_gather": { - Description: "Sets the maximum number of workers that can be started by a single Gather or Gather Merge node.", - Optional: true, - Type: schema.TypeInt, - }, - "max_pred_locks_per_transaction": { - Description: "PostgreSQL maximum predicate locks per transaction.", - Optional: true, - Type: schema.TypeInt, - }, - "max_prepared_transactions": { - Description: "PostgreSQL maximum prepared transactions.", - Optional: true, - Type: schema.TypeInt, - }, - "max_replication_slots": { - Description: "PostgreSQL maximum replication slots.", - Optional: true, - Type: schema.TypeInt, - }, - "max_slot_wal_keep_size": { - Description: "PostgreSQL maximum WAL size (MB) reserved for replication slots. Default is -1 (unlimited). wal_keep_size minimum WAL size setting takes precedence over this.", - Optional: true, - Type: schema.TypeInt, - }, - "max_stack_depth": { - Description: "Maximum depth of the stack in bytes.", - Optional: true, - Type: schema.TypeInt, - }, - "max_standby_archive_delay": { - Description: "Max standby archive delay in milliseconds.", - Optional: true, - Type: schema.TypeInt, - }, - "max_standby_streaming_delay": { - Description: "Max standby streaming delay in milliseconds.", - Optional: true, - Type: schema.TypeInt, - }, - "max_wal_senders": { - Description: "PostgreSQL maximum WAL senders.", - Optional: true, - Type: schema.TypeInt, - }, - "max_worker_processes": { - Description: "Sets the maximum number of background processes that the system can support.", - Optional: true, - Type: schema.TypeInt, - }, - "pg_partman_bgw__dot__interval": { - Description: "Sets the time interval to run pg_partman's scheduled tasks.", - Optional: true, - Type: schema.TypeInt, - }, - "pg_partman_bgw__dot__role": { - Description: "Controls which role to use for pg_partman's scheduled background tasks.", - Optional: true, - Type: schema.TypeString, - }, - "pg_stat_monitor__dot__pgsm_enable_query_plan": { - Description: "Enables or disables query plan monitoring.", - Optional: true, - Type: schema.TypeBool, - }, - "pg_stat_monitor__dot__pgsm_max_buckets": { - Description: "Sets the maximum number of buckets .", - Optional: true, - Type: schema.TypeInt, - }, - "pg_stat_statements__dot__track": { - Description: "Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.", - Optional: true, - Type: schema.TypeString, - }, - "temp_file_limit": { - Description: "PostgreSQL temporary file limit in KiB, -1 for unlimited.", - Optional: true, - Type: schema.TypeInt, - }, - "timezone": { - Description: "PostgreSQL service timezone.", - Optional: true, - Type: schema.TypeString, - }, - "track_activity_query_size": { - Description: "Specifies the number of bytes reserved to track the currently executing command for each active session.", - Optional: true, - Type: schema.TypeInt, - }, - "track_commit_timestamp": { - Description: "Record commit time of transactions.", - Optional: true, - Type: schema.TypeString, - }, - "track_functions": { - Description: "Enables tracking of function call counts and time used.", - Optional: true, - Type: schema.TypeString, - }, - "track_io_timing": { - Description: "Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.", - Optional: true, - Type: schema.TypeString, - }, - "wal_sender_timeout": { - Description: "Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout.", - Optional: true, - Type: schema.TypeInt, - }, - "wal_writer_delay": { - Description: "WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance.", - Optional: true, - Type: schema.TypeInt, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "pg_qualstats": { - Description: "System-wide settings for the pg_qualstats extension.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "enabled": { - Default: false, - Description: "Enable / Disable pg_qualstats. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - "min_err_estimate_num": { - Default: "0", - Description: "Error estimation num threshold to save quals. The default value is `0`.", - Optional: true, - Type: schema.TypeInt, - }, - "min_err_estimate_ratio": { - Default: "0", - Description: "Error estimation ratio threshold to save quals. The default value is `0`.", - Optional: true, - Type: schema.TypeInt, - }, - "track_constants": { - Default: true, - Description: "Enable / Disable pg_qualstats constants tracking. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "track_pg_catalog": { - Default: false, - Description: "Track quals on system catalogs too. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "enabled": { - Default: false, - Description: "Enable / Disable pg_qualstats. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - "min_err_estimate_num": { - Default: "0", - Description: "Error estimation num threshold to save quals. The default value is `0`.", - Optional: true, - Type: schema.TypeInt, - }, - "min_err_estimate_ratio": { - Default: "0", - Description: "Error estimation ratio threshold to save quals. The default value is `0`.", - Optional: true, - Type: schema.TypeInt, - }, - "track_constants": { - Default: true, - Description: "Enable / Disable pg_qualstats constants tracking. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "track_pg_catalog": { - Default: false, - Description: "Track quals on system catalogs too. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "pg_read_replica": { - Deprecated: "Usage of this field is discouraged.", - Description: "Use read_replica service integration instead.", - Optional: true, - Type: schema.TypeBool, - }, - "pg_service_to_fork_from": { - Deprecated: "Usage of this field is discouraged.", - Description: "Name of the PG Service from which to fork (deprecated, use service_to_fork_from). This has effect only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "pg_stat_monitor_enable": { - Default: false, - Description: "Enable the pg_stat_monitor extension. Enabling this extension will cause the cluster to be restarted.When this extension is enabled, pg_stat_statements results for utility commands are unreliable. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - "pg_version": { - Description: "PostgreSQL major version.", - Optional: true, - Type: schema.TypeString, - }, - "pgaudit": { - Description: "System-wide settings for the pgaudit extension.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "feature_enabled": { - Default: false, - Description: "Enable pgaudit extension. When enabled, pgaudit extension will be automatically installed.Otherwise, extension will be uninstalled but auditing configurations will be preserved. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - "log": { - Description: "Specifies which classes of statements will be logged by session audit logging.", - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - Type: schema.TypeList, - }, - "log_catalog": { - Default: true, - Description: "Specifies that session logging should be enabled in the casewhere all relations in a statement are in pg_catalog. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "log_client": { - Default: false, - Description: "Specifies whether log messages will be visible to a client process such as psql. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - "log_level": { - Default: "log", - Description: "Specifies the log level that will be used for log entries. The default value is `log`.", - Optional: true, - Type: schema.TypeString, - }, - "log_max_string_length": { - Default: "-1", - Description: "Crop parameters representation and whole statements if they exceed this threshold. A (default) value of -1 disable the truncation. The default value is `-1`.", - Optional: true, - Type: schema.TypeInt, - }, - "log_nested_statements": { - Default: true, - Description: "This GUC allows to turn off logging nested statements, that is, statements that are executed as part of another ExecutorRun. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "log_parameter": { - Default: false, - Description: "Specifies that audit logging should include the parameters that were passed with the statement. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - "log_parameter_max_size": { - Default: "0", - Description: "Specifies that parameter values longer than this setting (in bytes) should not be logged, but replaced with . The default value is `0`.", - Optional: true, - Type: schema.TypeInt, - }, - "log_relation": { - Default: false, - Description: "Specifies whether session audit logging should create a separate log entry for each relation (TABLE, VIEW, etc.) referenced in a SELECT or DML statement. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - "log_rows": { - Default: false, - Description: "Specifies that audit logging should include the rows retrieved or affected by a statement. When enabled the rows field will be included after the parameter field. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - "log_statement": { - Default: true, - Description: "Specifies whether logging will include the statement text and parameters (if enabled). The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "log_statement_once": { - Default: false, - Description: "Specifies whether logging will include the statement text and parameters with the first log entry for a statement/substatement combination or with every entry. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - "role": { - Description: "Specifies the master role to use for object audit logging.", - Optional: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "feature_enabled": { - Default: false, - Description: "Enable pgaudit extension. When enabled, pgaudit extension will be automatically installed.Otherwise, extension will be uninstalled but auditing configurations will be preserved. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - "log": { - Description: "Specifies which classes of statements will be logged by session audit logging.", - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - Type: schema.TypeList, - }, - "log_catalog": { - Default: true, - Description: "Specifies that session logging should be enabled in the casewhere all relations in a statement are in pg_catalog. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "log_client": { - Default: false, - Description: "Specifies whether log messages will be visible to a client process such as psql. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - "log_level": { - Default: "log", - Description: "Specifies the log level that will be used for log entries. The default value is `log`.", - Optional: true, - Type: schema.TypeString, - }, - "log_max_string_length": { - Default: "-1", - Description: "Crop parameters representation and whole statements if they exceed this threshold. A (default) value of -1 disable the truncation. The default value is `-1`.", - Optional: true, - Type: schema.TypeInt, - }, - "log_nested_statements": { - Default: true, - Description: "This GUC allows to turn off logging nested statements, that is, statements that are executed as part of another ExecutorRun. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "log_parameter": { - Default: false, - Description: "Specifies that audit logging should include the parameters that were passed with the statement. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - "log_parameter_max_size": { - Default: "0", - Description: "Specifies that parameter values longer than this setting (in bytes) should not be logged, but replaced with . The default value is `0`.", - Optional: true, - Type: schema.TypeInt, - }, - "log_relation": { - Default: false, - Description: "Specifies whether session audit logging should create a separate log entry for each relation (TABLE, VIEW, etc.) referenced in a SELECT or DML statement. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - "log_rows": { - Default: false, - Description: "Specifies that audit logging should include the rows retrieved or affected by a statement. When enabled the rows field will be included after the parameter field. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - "log_statement": { - Default: true, - Description: "Specifies whether logging will include the statement text and parameters (if enabled). The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "log_statement_once": { - Default: false, - Description: "Specifies whether logging will include the statement text and parameters with the first log entry for a statement/substatement combination or with every entry. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - "role": { - Description: "Specifies the master role to use for object audit logging.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "pgbouncer": { - Description: "System-wide settings for pgbouncer.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "autodb_idle_timeout": { - Default: "3600", - Description: "If the automatically created database pools have been unused this many seconds, they are freed. If 0 then timeout is disabled. (seconds). The default value is `3600`.", - Optional: true, - Type: schema.TypeInt, - }, - "autodb_max_db_connections": { - Description: "Do not allow more than this many server connections per database (regardless of user). Setting it to 0 means unlimited.", - Optional: true, - Type: schema.TypeInt, - }, - "autodb_pool_mode": { - Default: "transaction", - Description: "PGBouncer pool mode. The default value is `transaction`.", - Optional: true, - Type: schema.TypeString, - }, - "autodb_pool_size": { - Default: "0", - Description: "If non-zero then create automatically a pool of that size per user when a pool doesn't exist. The default value is `0`.", - Optional: true, - Type: schema.TypeInt, - }, - "ignore_startup_parameters": { - Description: "List of parameters to ignore when given in startup packet.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 32, - Optional: true, - Type: schema.TypeList, - }, - "min_pool_size": { - Default: "0", - Description: "Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size. The default value is `0`.", - Optional: true, - Type: schema.TypeInt, - }, - "server_idle_timeout": { - Default: "600", - Description: "If a server connection has been idle more than this many seconds it will be dropped. If 0 then timeout is disabled. (seconds). The default value is `600`.", - Optional: true, - Type: schema.TypeInt, - }, - "server_lifetime": { - Default: "3600", - Description: "The pooler will close an unused server connection that has been connected longer than this. (seconds). The default value is `3600`.", - Optional: true, - Type: schema.TypeInt, - }, - "server_reset_query_always": { - Default: false, - Description: "Run server_reset_query (DISCARD ALL) in all pooling modes. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "autodb_idle_timeout": { - Default: "3600", - Description: "If the automatically created database pools have been unused this many seconds, they are freed. If 0 then timeout is disabled. (seconds). The default value is `3600`.", - Optional: true, - Type: schema.TypeInt, - }, - "autodb_max_db_connections": { - Description: "Do not allow more than this many server connections per database (regardless of user). Setting it to 0 means unlimited.", - Optional: true, - Type: schema.TypeInt, - }, - "autodb_pool_mode": { - Default: "transaction", - Description: "PGBouncer pool mode. The default value is `transaction`.", - Optional: true, - Type: schema.TypeString, - }, - "autodb_pool_size": { - Default: "0", - Description: "If non-zero then create automatically a pool of that size per user when a pool doesn't exist. The default value is `0`.", - Optional: true, - Type: schema.TypeInt, - }, - "ignore_startup_parameters": { - Description: "List of parameters to ignore when given in startup packet.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 32, - Optional: true, - Type: schema.TypeList, - }, - "min_pool_size": { - Default: "0", - Description: "Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size. The default value is `0`.", - Optional: true, - Type: schema.TypeInt, - }, - "server_idle_timeout": { - Default: "600", - Description: "If a server connection has been idle more than this many seconds it will be dropped. If 0 then timeout is disabled. (seconds). The default value is `600`.", - Optional: true, - Type: schema.TypeInt, - }, - "server_lifetime": { - Default: "3600", - Description: "The pooler will close an unused server connection that has been connected longer than this. (seconds). The default value is `3600`.", - Optional: true, - Type: schema.TypeInt, - }, - "server_reset_query_always": { - Default: false, - Description: "Run server_reset_query (DISCARD ALL) in all pooling modes. The default value is `false`.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "pglookout": { - Description: "System-wide settings for pglookout.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{"max_failover_replication_time_lag": { - Default: "60", - Description: "Number of seconds of master unavailability before triggering database failover to standby. The default value is `60`.", - Optional: true, - Type: schema.TypeInt, - }}), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{"max_failover_replication_time_lag": { - Default: "60", - Description: "Number of seconds of master unavailability before triggering database failover to standby. The default value is `60`.", - Optional: true, - Type: schema.TypeInt, - }}}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "private_access": { - Description: "Allow access to selected service ports from private networks.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "pg": { - Description: "Allow clients to connect to pg with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "pgbouncer": { - Description: "Allow clients to connect to pgbouncer with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "pg": { - Description: "Allow clients to connect to pg with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "pgbouncer": { - Description: "Allow clients to connect to pgbouncer with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "privatelink_access": { - Description: "Allow access to selected service components through Privatelink.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "pg": { - Description: "Enable pg.", - Optional: true, - Type: schema.TypeBool, - }, - "pgbouncer": { - Description: "Enable pgbouncer.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Enable prometheus.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "pg": { - Description: "Enable pg.", - Optional: true, - Type: schema.TypeBool, - }, - "pgbouncer": { - Description: "Enable pgbouncer.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Enable prometheus.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "project_to_fork_from": { - Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "public_access": { - Description: "Allow access to selected service ports from the public Internet.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "pg": { - Description: "Allow clients to connect to pg from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "pgbouncer": { - Description: "Allow clients to connect to pgbouncer from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "pg": { - Description: "Allow clients to connect to pg from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "pgbouncer": { - Description: "Allow clients to connect to pgbouncer from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "prometheus": { - Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "recovery_target_time": { - Description: "Recovery target time when forking a service. This has effect only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "service_log": { - Description: "Store logs for the service so that they are available in the HTTP API and console.", - Optional: true, - Type: schema.TypeBool, - }, - "service_to_fork_from": { - Description: "Name of another service to fork from. This has effect only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "shared_buffers_percentage": { - Description: "Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value.", - Optional: true, - Type: schema.TypeFloat, - }, - "static_ips": { - Description: "Use static public IP addresses.", - Optional: true, - Type: schema.TypeBool, - }, - "synchronous_replication": { - Description: "Synchronous replication type. Note that the service plan also needs to support synchronous replication.", - Optional: true, - Type: schema.TypeString, - }, - "timescaledb": { - Description: "System-wide settings for the timescaledb extension.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{"max_background_workers": { - Default: "16", - Description: "The number of background workers for timescaledb operations. You should configure this setting to the sum of your number of databases and the total number of concurrent background workers you want running at any given point in time. The default value is `16`.", - Optional: true, - Type: schema.TypeInt, - }}), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{"max_background_workers": { - Default: "16", - Description: "The number of background workers for timescaledb operations. You should configure this setting to the sum of your number of databases and the total number of concurrent background workers you want running at any given point in time. The default value is `16`.", - Optional: true, - Type: schema.TypeInt, - }}}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "variant": { - Description: "Variant of the PostgreSQL service, may affect the features that are exposed by default.", - Optional: true, - Type: schema.TypeString, - }, - "work_mem": { - Description: "Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB).", - Optional: true, - Type: schema.TypeInt, - }, - } - - return &schema.Schema{ - Description: "Pg user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} - -// ServiceTypeRedis is a generated function returning the schema of the redis ServiceType. -func ServiceTypeRedis() *schema.Schema { - s := map[string]*schema.Schema{ - "additional_backup_regions": { - Description: "Additional Cloud Regions for Backup Replication.", - Elem: &schema.Schema{Type: schema.TypeString}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter": { - Deprecated: "This will be removed in v5.0.0 and replaced with ip_filter_string instead.", - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_object": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "description": { - Description: "Description for IP filter list entry.", - Optional: true, - Type: schema.TypeString, - }, - "network": { - Description: "CIDR address block.", - Required: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "ip_filter_string": { - Description: "Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'.", - DiffSuppressFunc: schemautil.IPFilterArrayDiffSuppressFunc, - Elem: &schema.Schema{ - DiffSuppressFunc: schemautil.IPFilterValueDiffSuppressFunc, - Type: schema.TypeString, - }, - MaxItems: 1024, - Optional: true, - Type: schema.TypeList, - }, - "migration": { - Description: "Migrate data from existing server.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "dbname": { - Description: "Database name for bootstrapping the initial connection.", - Optional: true, - Type: schema.TypeString, - }, - "host": { - Description: "Hostname or IP address of the server where to migrate data from.", - Required: true, - Type: schema.TypeString, - }, - "ignore_dbs": { - Description: "Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).", - Optional: true, - Type: schema.TypeString, - }, - "method": { - Description: "The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).", - Optional: true, - Type: schema.TypeString, - }, - "password": { - Description: "Password for authentication with the server where to migrate data from.", - Optional: true, - Sensitive: true, - Type: schema.TypeString, - }, - "port": { - Description: "Port number of the server where to migrate data from.", - Required: true, - Type: schema.TypeInt, - }, - "ssl": { - Default: true, - Description: "The server where to migrate data from is secured with SSL. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "username": { - Description: "User name for authentication with the server where to migrate data from.", - Optional: true, - Type: schema.TypeString, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "dbname": { - Description: "Database name for bootstrapping the initial connection.", - Optional: true, - Type: schema.TypeString, - }, - "host": { - Description: "Hostname or IP address of the server where to migrate data from.", - Required: true, - Type: schema.TypeString, - }, - "ignore_dbs": { - Description: "Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment).", - Optional: true, - Type: schema.TypeString, - }, - "method": { - Description: "The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).", - Optional: true, - Type: schema.TypeString, - }, - "password": { - Description: "Password for authentication with the server where to migrate data from.", - Optional: true, - Sensitive: true, - Type: schema.TypeString, - }, - "port": { - Description: "Port number of the server where to migrate data from.", - Required: true, - Type: schema.TypeInt, - }, - "ssl": { - Default: true, - Description: "The server where to migrate data from is secured with SSL. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "username": { - Description: "User name for authentication with the server where to migrate data from.", - Optional: true, - Type: schema.TypeString, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "private_access": { - Description: "Allow access to selected service ports from private networks.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "prometheus": { - Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "redis": { - Description: "Allow clients to connect to redis with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "prometheus": { - Description: "Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - "redis": { - Description: "Allow clients to connect to redis with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "privatelink_access": { - Description: "Allow access to selected service components through Privatelink.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "prometheus": { - Description: "Enable prometheus.", - Optional: true, - Type: schema.TypeBool, - }, - "redis": { - Description: "Enable redis.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "prometheus": { - Description: "Enable prometheus.", - Optional: true, - Type: schema.TypeBool, - }, - "redis": { - Description: "Enable redis.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "project_to_fork_from": { - Description: "Name of another project to fork a service from. This has effect only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "public_access": { - Description: "Allow access to selected service ports from the public Internet.", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(map[string]*schema.Schema{ - "prometheus": { - Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "redis": { - Description: "Allow clients to connect to redis from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - }), - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "prometheus": { - Description: "Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - "redis": { - Description: "Allow clients to connect to redis from the public internet for service nodes that are in a project VPC or another type of private network.", - Optional: true, - Type: schema.TypeBool, - }, - }}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - }, - "recovery_basebackup_name": { - Description: "Name of the basebackup to restore in forked service.", - Optional: true, - Type: schema.TypeString, - }, - "redis_acl_channels_default": { - Description: "Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.", - Optional: true, - Type: schema.TypeString, - }, - "redis_io_threads": { - Description: "Set Redis IO thread count. Changing this will cause a restart of the Redis service.", - Optional: true, - Type: schema.TypeInt, - }, - "redis_lfu_decay_time": { - Default: "1", - Description: "LFU maxmemory-policy counter decay time in minutes. The default value is `1`.", - Optional: true, - Type: schema.TypeInt, - }, - "redis_lfu_log_factor": { - Default: "10", - Description: "Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. The default value is `10`.", - Optional: true, - Type: schema.TypeInt, - }, - "redis_maxmemory_policy": { - Default: "noeviction", - Description: "Redis maxmemory-policy. The default value is `noeviction`.", - Optional: true, - Type: schema.TypeString, - }, - "redis_notify_keyspace_events": { - Default: "", - Description: "Set notify-keyspace-events option.", - Optional: true, - Type: schema.TypeString, - }, - "redis_number_of_databases": { - Description: "Set number of Redis databases. Changing this will cause a restart of the Redis service.", - Optional: true, - Type: schema.TypeInt, - }, - "redis_persistence": { - Description: "When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.", - Optional: true, - Type: schema.TypeString, - }, - "redis_pubsub_client_output_buffer_limit": { - Description: "Set output buffer limit for pub / sub clients in MB. The value is the hard limit, the soft limit is 1/4 of the hard limit. When setting the limit, be mindful of the available memory in the selected service plan.", - Optional: true, - Type: schema.TypeInt, - }, - "redis_ssl": { - Default: true, - Description: "Require SSL to access Redis. The default value is `true`.", - Optional: true, - Type: schema.TypeBool, - }, - "redis_timeout": { - Default: "300", - Description: "Redis idle connection timeout in seconds. The default value is `300`.", - Optional: true, - Type: schema.TypeInt, - }, - "redis_version": { - Description: "Redis major version.", - Optional: true, - Type: schema.TypeString, - }, - "service_log": { - Description: "Store logs for the service so that they are available in the HTTP API and console.", - Optional: true, - Type: schema.TypeBool, - }, - "service_to_fork_from": { - Description: "Name of another service to fork from. This has effect only when a new service is being created.", - ForceNew: true, - Optional: true, - Type: schema.TypeString, - }, - "static_ips": { - Description: "Use static public IP addresses.", - Optional: true, - Type: schema.TypeBool, - }, - } - - return &schema.Schema{ - Description: "Redis user configurable settings", - DiffSuppressFunc: schemautil.EmptyObjectDiffSuppressFuncSkipArrays(s), - Elem: &schema.Resource{Schema: s}, - MaxItems: 1, - Optional: true, - Type: schema.TypeList, - } -} diff --git a/internal/schemautil/userconfig/handle.go b/internal/schemautil/userconfig/handle.go deleted file mode 100644 index 90ab028b7..000000000 --- a/internal/schemautil/userconfig/handle.go +++ /dev/null @@ -1,293 +0,0 @@ -//nolint:unused -package userconfig - -import ( - "fmt" - - "github.com/dave/jennifer/jen" - "golang.org/x/exp/maps" -) - -// handlePrimitiveTypeProperty is a function that converts a primitive type property to a Terraform schema. -func handlePrimitiveTypeProperty( - name string, - property map[string]any, - typeStr string, - isRequired bool, -) map[string]*jen.Statement { - return map[string]*jen.Statement{ - name: jen.Values(convertPropertyToSchema(name, property, typeStr, true, isRequired)), - } -} - -// handleObjectProperty converts an object type property to a Terraform schema. -func handleObjectProperty( - objectName string, - propertyMap map[string]any, - typeString string, - requiredProperties map[string]struct{}, -) (map[string]*jen.Statement, error) { - properties, propertiesExist := propertyMap["properties"].(map[string]any) - if !propertiesExist { - itemsAt, itemsExist := propertyMap["items"].(map[string]any) - if itemsExist { - properties, propertiesExist = itemsAt["properties"].(map[string]any) - } - - if !propertiesExist { - return nil, fmt.Errorf("unable to get properties field: %#v", propertyMap) - } - } - - resourceStatements := convertPropertyToSchema( - objectName, propertyMap, typeString, true, false, - ) - - schemaMapAt, err := convertPropertiesToSchemaMap(properties, requiredProperties) - if err != nil { - return nil, err - } - - schemaValues := jen.Map(jen.String()).Op("*").Qual(SchemaPackage, "Schema").Values(schemaMapAt) - - resourceStatements[jen.Id("Elem")] = jen.Op("&").Qual(SchemaPackage, "Resource").Values(jen.Dict{ - jen.Id("Schema"): schemaValues, - }) - - // TODO: Check if we can access the schema via diff suppression function. - resourceStatements[jen.Id("DiffSuppressFunc")] = jen.Qual( - SchemaUtilPackage, "EmptyObjectDiffSuppressFuncSkipArrays", - ).Call(schemaValues) - - resourceStatements[jen.Id("MaxItems")] = jen.Lit(1) - - return map[string]*jen.Statement{objectName: jen.Values(resourceStatements)}, nil -} - -// handleArrayOfPrimitiveTypeProperty is a function that converts an array of primitive type property to a Terraform -// schema. -func handleArrayOfPrimitiveTypeProperty(propertyName string, terraformType string) *jen.Statement { - propertyAttributes := jen.Dict{ - jen.Id("Type"): jen.Qual(SchemaPackage, terraformType), - } - - if propertyName == "ip_filter" { - // TODO: Add ip_filter_object to this sanity check when DiffSuppressFunc is implemented for it. - propertyAttributes[jen.Id("DiffSuppressFunc")] = jen.Qual( - SchemaUtilPackage, "IPFilterValueDiffSuppressFunc", - ) - } - - return jen.Op("&").Qual(SchemaPackage, "Schema").Values(propertyAttributes) -} - -// handleArrayOfAggregateTypeProperty is a function that converts an array of aggregate type property to a Terraform -// schema. -func handleArrayOfAggregateTypeProperty(ip map[string]any, req map[string]struct{}) (*jen.Statement, error) { - pc, err := convertPropertiesToSchemaMap(ip, req) - if err != nil { - return nil, err - } - - return jen.Op("&").Qual(SchemaPackage, "Resource").Values(jen.Dict{ - jen.Id("Schema"): jen.Map(jen.String()).Op("*").Qual(SchemaPackage, "Schema").Values(pc), - }), nil -} - -// handleArrayProperty is a function that converts an array type property to a Terraform schema. -func handleArrayProperty( - propertyName string, - propertyMap map[string]any, - terraformType string, -) (map[string]*jen.Statement, error) { - itemAttributes, ok := propertyMap["items"].(map[string]any) - if !ok { - return nil, fmt.Errorf("items is not a map[string]any: %#v", propertyMap) - } - - var element *jen.Statement - - var terraformNames, aivenTypeNames []string - - var err error - - oneOfOptions, isOneOf := itemAttributes["one_of"].([]any) - if isOneOf { - var complexTypes []string - - for _, v := range oneOfOptions { - oneOfMap, ok := v.(map[string]any) - if !ok { - return nil, fmt.Errorf("one_of element is not a map[string]any: %#v", v) - } - - complexTypes = append(complexTypes, oneOfMap["type"].(string)) - } - - terraformNames, aivenTypeNames, err = TerraformTypes(complexTypes) - if err != nil { - return nil, err - } - } else { - terraformNames, aivenTypeNames, err = TerraformTypes(SlicedString(itemAttributes["type"])) - if err != nil { - return nil, err - } - } - - result := make(map[string]*jen.Statement) - - for k, terraformName := range terraformNames { - adjustedName := propertyName - - if len(terraformNames) > 1 { - adjustedName = fmt.Sprintf("%s_%s", propertyName, aivenTypeNames[k]) - - // TODO: Remove with the next major version. - if adjustedName == "ip_filter_string" { - adjustedName = "ip_filter" - } - - // TODO: Remove with the next major version. - if adjustedName == "namespaces_string" { - adjustedName = "namespaces" - } - } - - var oneOfItemAttributes map[string]any - - if isOneOf { - oneOfItemAttributes, ok = oneOfOptions[k].(map[string]any) - if !ok { - return nil, - fmt.Errorf("unable to convert one_of item to map[string]any: %#v", oneOfOptions[k]) - } - } - - if isTerraformTypePrimitive(terraformName) { - element = handleArrayOfPrimitiveTypeProperty(adjustedName, terraformName) - } else { - var itemProperties map[string]any - - if isOneOf { - itemProperties, ok = oneOfItemAttributes["properties"].(map[string]any) - if !ok { - return nil, fmt.Errorf( - "unable to convert one_of item properties to map[string]any: %#v", - oneOfItemAttributes, - ) - } - } else { - itemProperties, ok = itemAttributes["properties"].(map[string]any) - if !ok { - return nil, - fmt.Errorf("could not find properties in an array of aggregate type: %#v", propertyMap) - } - } - - requiredProperties := map[string]struct{}{} - - if requiredItems, ok := itemAttributes["required"].([]any); ok { - requiredProperties = SliceToKeyedMap(requiredItems) - } - - element, err = handleArrayOfAggregateTypeProperty(itemProperties, requiredProperties) - if err != nil { - return nil, err - } - } - - schema := convertPropertyToSchema(propertyName, propertyMap, terraformType, !isOneOf, false) - - if isOneOf { - oneOfType, ok := oneOfItemAttributes["type"].(string) - if !ok { - return nil, fmt.Errorf("one_of item type is not a string: %#v", oneOfItemAttributes) - } - - _, defaultPropertyDescription := descriptionForProperty(propertyMap, terraformType) - - deprecationIndicator, oneOfItemDescription := descriptionForProperty(oneOfItemAttributes, oneOfType) - - schema[jen.Id("Description")] = jen.Lit( - fmt.Sprintf("%s %s", defaultPropertyDescription, oneOfItemDescription), - ) - - if deprecationIndicator { - schema[jen.Id("Deprecated")] = jen.Lit("Usage of this field is discouraged.") - } - } - - schema[jen.Id("Elem")] = element - - if adjustedName == "ip_filter" { - // TODO: Add ip_filter_object to this sanity check when DiffSuppressFunc is implemented for it. - schema[jen.Id("DiffSuppressFunc")] = jen.Qual( - SchemaUtilPackage, "IPFilterArrayDiffSuppressFunc", - ) - } - - if maxItems, ok := propertyMap["max_items"].(int); ok { - schema[jen.Id("MaxItems")] = jen.Lit(maxItems) - } - - orderedSchema := jen.Dict{} - for key, value := range schema { - orderedSchema[key] = value - } - - // TODO: Remove with the next major version. - if adjustedName == "ip_filter" || (isOneOf && adjustedName == "namespaces") { - schema[jen.Id("Deprecated")] = jen.Lit( - fmt.Sprintf("This will be removed in v5.0.0 and replaced with %s_string instead.", adjustedName), - ) - } - - result[adjustedName] = jen.Values(schema) - - if adjustedName == "ip_filter" || (isOneOf && adjustedName == "namespaces") { - result[fmt.Sprintf("%s_string", adjustedName)] = jen.Values(orderedSchema) - } - } - - return result, nil -} - -// handleAggregateTypeProperty is a function that converts an aggregate type property to a Terraform schema. -func handleAggregateTypeProperty( - propertyName string, - propertyAttributes map[string]any, - terraformType string, - aivenType string, -) (map[string]*jen.Statement, error) { - resultStatements := make(map[string]*jen.Statement) - - requiredProperties := map[string]struct{}{} - - if requiredSlice, ok := propertyAttributes["required"].([]any); ok { - requiredProperties = SliceToKeyedMap(requiredSlice) - } - - switch aivenType { - case "object": - objectStatements, err := handleObjectProperty( - propertyName, propertyAttributes, terraformType, requiredProperties, - ) - if err != nil { - return nil, err - } - - maps.Copy(resultStatements, objectStatements) - case "array": - arrayStatements, err := handleArrayProperty(propertyName, propertyAttributes, terraformType) - if err != nil { - return nil, err - } - - maps.Copy(resultStatements, arrayStatements) - default: - return nil, fmt.Errorf("unknown aggregate type: %s", aivenType) - } - - return resultStatements, nil -} diff --git a/internal/schemautil/userconfig/userconfig_test.go b/internal/schemautil/userconfig/userconfig_test.go deleted file mode 100644 index 783710e24..000000000 --- a/internal/schemautil/userconfig/userconfig_test.go +++ /dev/null @@ -1,128 +0,0 @@ -//go:build userconfig - -package userconfig - -import ( - "fmt" - "testing" - - "github.com/aiven/go-api-schemas/pkg/dist" - "github.com/dave/jennifer/jen" - "github.com/ettle/strcase" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" -) - -// generateSchema generates Terraform schema from a map representation of the schema. -func generateSchema(schemaName string, schemaMap map[string]any) error { - schemaNamePlural := fmt.Sprintf("%ss", schemaName) - - file := jen.NewFile("dist") - - file.HeaderComment("Code generated by internal/schemautil/userconfig/userconfig_test.go; DO NOT EDIT.") - - sortedMapKeys := maps.Keys(schemaMap) - slices.Sort(sortedMapKeys) - - for _, key := range sortedMapKeys { - value := schemaMap[key] - - keyPascalCase := strcase.ToGoPascal(key) - - valueAsserted, ok := value.(map[string]any) - if !ok { - continue - } - - properties, ok := valueAsserted["properties"].(map[string]any) - if !ok { - continue - } - - functionName := fmt.Sprintf("%s%s", schemaName, keyPascalCase) - - file.Commentf( - "%s is a generated function returning the schema of the %s %s.", functionName, key, schemaName, - ) - - required := map[string]struct{}{} - - if requiredSlice, ok := valueAsserted["required"].([]any); ok { - required = SliceToKeyedMap(requiredSlice) - } - - propertiesMap, err := convertPropertiesToSchemaMap(properties, required) - if err != nil { - return err - } - - file. - Func(). - Id(functionName). - Params(). - Id("*schema.Schema"). - Block( - jen.Id("s").Op(":=").Map(jen.String()).Op("*").Qual( - SchemaPackage, "Schema", - ).Values(propertiesMap), - - jen.Line(), - - jen.Return( - jen.Op("&").Qual(SchemaPackage, "Schema").Values(jen.Dict{ - jen.Id("Type"): jen.Qual(SchemaPackage, "TypeList"), - jen.Id("Description"): jen.Lit(fmt.Sprintf( - "%s user configurable settings", keyPascalCase, - )), - jen.Id("Elem"): jen.Op("&").Qual(SchemaPackage, "Resource"). - Values(jen.Dict{jen.Id("Schema"): jen.Id("s")}), - jen.Id("Optional"): jen.Lit(true), - jen.Id("DiffSuppressFunc"): jen.Qual( - SchemaUtilPackage, "EmptyObjectDiffSuppressFuncSkipArrays", - ).Call(jen.Id("s")), - jen.Id("MaxItems"): jen.Lit(1), - }), - ), - ). - Line() - } - - if err := file.Save(fmt.Sprintf("dist/%s.go", strcase.ToSnake(schemaNamePlural))); err != nil { - return err - } - - return nil -} - -// TestMain is the entry point for the user config schema generator. -func TestMain(m *testing.M) { - serviceTypesMap, err := representationToMap(ServiceTypes, dist.ServiceTypes) - if err != nil { - panic(err) - } - - err = generateSchema("ServiceType", serviceTypesMap) - if err != nil { - panic(err) - } - - integrationTypesMap, err := representationToMap(IntegrationTypes, dist.IntegrationTypes) - if err != nil { - panic(err) - } - - err = generateSchema("IntegrationType", integrationTypesMap) - if err != nil { - panic(err) - } - - integrationEndpointTypesMap, err := representationToMap(IntegrationEndpointTypes, dist.IntegrationEndpointTypes) - if err != nil { - panic(err) - } - - err = generateSchema("IntegrationEndpointType", integrationEndpointTypesMap) - if err != nil { - panic(err) - } -} diff --git a/internal/schemautil/userconfig/util.go b/internal/schemautil/userconfig/util.go deleted file mode 100644 index e616e7ada..000000000 --- a/internal/schemautil/userconfig/util.go +++ /dev/null @@ -1,251 +0,0 @@ -//nolint:unused -package userconfig - -import ( - "fmt" - "regexp" - "strings" - "sync" - - "github.com/aiven/go-api-schemas/pkg/dist" - "gopkg.in/yaml.v3" -) - -// SchemaType represents a custom type for Terraform schema. -type SchemaType int - -const ( - // ServiceTypes represents the service schema type. - ServiceTypes SchemaType = iota - - // IntegrationTypes represents the integration schema type. - IntegrationTypes - - // IntegrationEndpointTypes represents the integration endpoint schema type. - IntegrationEndpointTypes -) - -var ( - // cachedRepresentationMaps is a map of cached representation maps. - cachedRepresentationMaps = make(map[SchemaType]map[string]any, 3) - - // cachedRepresentationMapsMutex is a mutex for the cached representation maps. - cachedRepresentationMapsMutex = sync.Mutex{} - - // typeSuffixRegExp is a regular expression that matches type suffixes. - typeSuffixRegExp = regexp.MustCompile(`^.*_(boolean|integer|number|string|array|object)$`) -) - -// CachedRepresentationMap returns a cached representation map for a given schema type. -func CachedRepresentationMap(schemaType SchemaType) (map[string]any, error) { - if _, ok := map[SchemaType]struct{}{ - ServiceTypes: {}, - IntegrationTypes: {}, - IntegrationEndpointTypes: {}, - }[schemaType]; !ok { - return nil, fmt.Errorf("unknown schema type: %d", schemaType) - } - - switch schemaType { - case ServiceTypes: - return representationToMap(schemaType, dist.ServiceTypes) - case IntegrationTypes: - return representationToMap(schemaType, dist.IntegrationTypes) - case IntegrationEndpointTypes: - return representationToMap(schemaType, dist.IntegrationEndpointTypes) - default: - return nil, fmt.Errorf("unknown schema type %d", schemaType) - } -} - -// representationToMap converts a YAML representation of a Terraform schema to a map. -func representationToMap(schemaType SchemaType, representation []byte) (map[string]any, error) { - cachedRepresentationMapsMutex.Lock() - defer cachedRepresentationMapsMutex.Unlock() - - if cachedMap, ok := cachedRepresentationMaps[schemaType]; ok { - return cachedMap, nil - } - - var mapRepresentation map[string]any - if err := yaml.Unmarshal(representation, &mapRepresentation); err != nil { - return nil, err - } - - cachedRepresentationMaps[schemaType] = mapRepresentation - return mapRepresentation, nil -} - -// TerraformTypes converts schema representation types to Terraform types. -func TerraformTypes(types []string) ([]string, []string, error) { - var terraformTypes, aivenTypes []string // nolint:prealloc - - for _, typeValue := range types { - switch typeValue { - case "null": - // TODO: Handle this case. - // This is a special case where the value can be null. - // There should be a default value set for this case. - continue - case "boolean": - terraformTypes = append(terraformTypes, "TypeBool") - case "integer": - terraformTypes = append(terraformTypes, "TypeInt") - case "number": - terraformTypes = append(terraformTypes, "TypeFloat") - case "string": - terraformTypes = append(terraformTypes, "TypeString") - case "array", "object": - terraformTypes = append(terraformTypes, "TypeList") - default: - return nil, nil, fmt.Errorf("unknown type: %s", typeValue) - } - - aivenTypes = append(aivenTypes, typeValue) - } - - return terraformTypes, aivenTypes, nil -} - -// isTerraformTypePrimitive checks if a Terraform type is a primitive type. -func isTerraformTypePrimitive(terraformType string) bool { - switch terraformType { - case "TypeBool", "TypeInt", "TypeFloat", "TypeString": - return true - default: - return false - } -} - -// mustStringSlice converts an interface to a slice of strings. -func mustStringSlice(value any) ([]string, error) { - valueAsSlice, ok := value.([]any) - if !ok { - return nil, fmt.Errorf("not a slice: %#v", value) - } - - stringSlice := make([]string, len(valueAsSlice)) - - for index, value := range valueAsSlice { - stringValue, ok := value.(string) - if !ok { - return nil, fmt.Errorf("value is not a string: %#v", value) - } - - stringSlice[index] = stringValue - } - - return stringSlice, nil -} - -// SlicedString accepts a string or a slice of strings and returns a slice of strings. -func SlicedString(value any) []string { - valueAsSlice, ok := value.([]any) - if ok { - stringSlice, err := mustStringSlice(valueAsSlice) - if err != nil { - panic(err) - } - - return stringSlice - } - - valueAsString, ok := value.(string) - if !ok { - panic(fmt.Sprintf("value is not a string or a slice of strings: %#v", value)) - } - - return []string{valueAsString} -} - -// constDescriptionReplaceables is a map of strings that are replaced in descriptions. -var constDescriptionReplaceables = map[string]string{ - "DEPRECATED: ": "", - "This setting is deprecated. ": "", - "[seconds]": "(seconds)", -} - -// descriptionForProperty returns the description for a property. -func descriptionForProperty( - property map[string]any, - terraformType string, -) (isDeprecated bool, description string) { - if descriptionValue, ok := property["description"].(string); ok { - description = descriptionValue - } else if title, ok := property["title"].(string); ok { - description = title - } - - isDeprecated = strings.Contains(strings.ToLower(description), "deprecated") - - // shouldCapitalize is a flag indicating if the first letter should be capitalized. - shouldCapitalize := false - - // Some descriptions have a built-in deprecation notice, so we need to remove it. - for old, new := range constDescriptionReplaceables { - previousDescription := description - - description = strings.ReplaceAll(description, old, new) - - if previousDescription != description { - shouldCapitalize = true - } - } - - descriptionBuilder := Desc(description) - - if shouldCapitalize { - descriptionBuilder = descriptionBuilder.ForceFirstLetterCapitalization() - } - - if defaultValue, ok := property["default"]; ok && isTerraformTypePrimitive(terraformType) { - skipDefaultValue := false - - if defaultValueAsString, ok := defaultValue.(string); ok { - if defaultValueAsString == "" { - skipDefaultValue = true - } - } - - if !skipDefaultValue { - descriptionBuilder = descriptionBuilder.DefaultValue(defaultValue) - } - } - - description = descriptionBuilder.Build() - - return isDeprecated, description -} - -// EncodeKey encodes a key for a Terraform schema. -func EncodeKey(key string) string { - return strings.ReplaceAll(key, ".", "__dot__") -} - -// DecodeKey decodes a key for a Terraform schema. -func DecodeKey(key string) string { - return strings.ReplaceAll(key, "__dot__", ".") -} - -// IsKeyTyped checks if a key is typed, i.e., has a type suffix in it. -func IsKeyTyped(key string) bool { - return typeSuffixRegExp.MatchString(key) -} - -// SliceToKeyedMap converts a slice of any type to a map with keys of type string. -// It expects that all elements in the slice are of type string, otherwise it will panic. -// The values in the map are of type struct{} to minimize memory usage, as we are only interested in the keys. -func SliceToKeyedMap(slice []any) map[string]struct{} { - // Initialize an empty map with string keys and struct{} values. - result := make(map[string]struct{}) - - // Iterate through each element in the slice. - for _, value := range slice { - // Assert that the element is of type string, and then use it as a key in the map. - // The value associated with each key is an empty struct{}. - result[value.(string)] = struct{}{} - } - - // Return the resulting map. - return result -}