diff --git a/.golangci.yaml b/.golangci.yaml index 3136f03..5ba24f8 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -25,7 +25,6 @@ linters: - nakedret - nestif - nilerr - - nlreturn - prealloc - revive - staticcheck @@ -33,7 +32,6 @@ linters: - unconvert - unused - whitespace - - wsl linters-settings: gocognit: diff --git a/cmd/root.go b/cmd/root.go index 59e3f11..776413c 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -139,7 +139,7 @@ func run(cmd *cobra.Command, _ []string) { logger.Info.Println("diffing") - dr, err := diff.Run(ctx, logger, gr, rr) + dr, err := diff.Run(rr, gr) if err != nil { logger.Error.Fatalf("error diffing: %s", err) } diff --git a/go.mod b/go.mod index 52ea15b..0076814 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/mitchellh/copystructure v1.2.0 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 + github.com/stretchr/testify v1.9.0 golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 golang.org/x/net v0.30.0 golang.org/x/sync v0.8.0 @@ -15,9 +16,11 @@ require ( ) require ( + github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect ) diff --git a/internal/convert/convert_test.go b/internal/convert/convert_test.go index 345a546..4a72e71 100644 --- a/internal/convert/convert_test.go +++ b/internal/convert/convert_test.go @@ -451,7 +451,7 @@ func TestUserConfigSchema(t *testing.T) { } if !cmp.Equal(got, tt.want) { - t.Errorf(cmp.Diff(tt.want, got)) + t.Error(cmp.Diff(tt.want, got)) } }) } diff --git a/internal/diff/diff.go b/internal/diff/diff.go index 3da9f95..44e3b43 100644 --- a/internal/diff/diff.go +++ b/internal/diff/diff.go @@ -1,312 +1,140 @@ -// Package diff is the package that contains the diff functionality. +// Package diff is the package that contains the diffMaps functionality. package diff import ( - "context" "fmt" - "golang.org/x/exp/maps" - "golang.org/x/sync/errgroup" + "golang.org/x/exp/slices" "github.com/aiven/go-api-schemas/internal/pkg/types" - "github.com/aiven/go-api-schemas/internal/pkg/util" ) -// logger is a pointer to the logger. -var logger *util.Logger - -// genResult is the result of the generation process. -var genResult types.GenerationResult - -// readResult is the result of the read process. -var readResult types.ReadResult +// Run runs the diffMaps. +func Run(was types.ReadResult, have types.GenerationResult) (types.DiffResult, error) { + result := make(types.DiffResult) + for _, k := range types.GetTypeKeys() { + result[k] = diffMaps(was[k], have[k]) + } -// result is the result of the diff process. -var result types.DiffResult + return result, nil +} -// diff is a function that diffs two maps. -// nolint:funlen,nestif,gocognit,gocyclo // This function is long, but it's not complex. -// // This function is nested, but it's not complex. -// // This function is complex, but it's a diff function. -func diff( - gen map[string]types.UserConfigSchema, - read map[string]types.UserConfigSchema, -) (map[string]types.UserConfigSchema, error) { - if len(read) == 0 { - return gen, nil +func diffTwo(was, have *types.UserConfigSchema) *types.UserConfigSchema { + switch { + case was == nil: + return have + case have == nil: + was.Deprecate("This property is deprecated.") + return was } - resultSchema := map[string]types.UserConfigSchema{} + // Properties + have.Properties = diffMaps(was.Properties, have.Properties) + have.Items = diffTwo(was.Items, have.Items) + have.OneOf = diffArrays(was.OneOf, have.OneOf) + have.Enum = diffEnums(was.Enum, have.Enum) + return have +} - for k, v := range read { - nv := v +func diffEnums(was, have []types.UserConfigSchemaEnumValue) []types.UserConfigSchemaEnumValue { + r := make(map[string]types.UserConfigSchemaEnumValue) + for _, v := range have { + r[stringify(v.Value)] = v + } - d, err := diff(gen[k].Properties, nv.Properties) - if err != nil { - return nil, err + for _, v := range was { + k := stringify(v.Value) + if _, ok := r[k]; !ok { + v.Deprecate("This value is deprecated.") + r[k] = v } + } - nv.Properties = d - - if nv.Items != nil && gen[k].Items != nil { - nv.Items.Title = gen[k].Items.Title - - nv.Items.Description = gen[k].Items.Description - - nv.Items.Type = gen[k].Items.Type - - nv.Items.Required = gen[k].Items.Required - - d, err = diff(gen[k].Items.Properties, nv.Items.Properties) - if err != nil { - return nil, err - } - - nv.Items.Properties = d - - if len(nv.Items.OneOf) != 0 { - for kn, vn := range nv.Items.OneOf { - if len(gen[k].Items.OneOf) > kn { - d, err = diff(gen[k].Items.OneOf[kn].Properties, vn.Properties) - if err != nil { - return nil, err - } - - nv.Items.OneOf[kn].Properties = d - } - - genExists := false - - for _, vg := range gen[k].Items.OneOf { - if vn.Title == vg.Title { - genExists = true - - break - } - } - - if !genExists { - nv.Items.OneOf[kn].IsDeprecated = true - - if nv.Items.OneOf[kn].DeprecationNotice == "" { - nv.Items.OneOf[kn].DeprecationNotice = "This item is deprecated." - } - } - } - - if len(gen[k].Items.OneOf) != 0 { - for _, vn := range gen[k].Items.OneOf { - readExists := false - - for k, vr := range nv.Items.OneOf { - if vn.Title == vr.Title { - nv.Items.OneOf[k].Description = vn.Description - - nv.Items.OneOf[k].Type = vn.Type - - nv.Items.OneOf[k].Required = vn.Required - - nv.Items.OneOf[k].MaxLength = vn.MaxLength - - nv.Items.OneOf[k].Pattern = vn.Pattern - - nv.Items.OneOf[k].Example = vn.Example - - readExists = true + return mapValues(r) +} - break - } - } +func diffArrays(was []types.UserConfigSchema, have []types.UserConfigSchema) []types.UserConfigSchema { + r := make(map[string]types.UserConfigSchema) + for _, v := range have { + r[stringify(v.Type)] = v + } - if !readExists { - nv.Items.OneOf = append(nv.Items.OneOf, vn) - } - } - } - } + for _, w := range was { + k := stringify(w.Type) + h, ok := r[k] + if !ok { + w.Deprecate("This item is deprecated.") + r[k] = w + continue } - for kn, vn := range nv.Enum { - genExists := false - - vnv := fmt.Sprintf("%v", vn.Value) - - for _, vg := range gen[k].Enum { - vgv := fmt.Sprintf("%v", vg.Value) - - if vnv == vgv { - genExists = true - - break - } - } - - if !genExists { - nv.Enum[kn].IsDeprecated = true - - if nv.Enum[kn].DeprecationNotice == "" { - nv.Enum[kn].DeprecationNotice = "This value is deprecated." - } - } - - for _, vn := range gen[k].Enum { - readExists := false - - vnv := fmt.Sprintf("%v", vn.Value) - - for _, vr := range nv.Enum { - vrv := fmt.Sprintf("%v", vr.Value) + r[k] = *diffTwo(&w, &h) + } - if vnv == vrv { - readExists = true + return mapValues(r) +} - break - } - } +// diffMaps returns the difference between the two maps. +// WARNING: Mutates the input maps. +func diffMaps(was, have map[string]types.UserConfigSchema) map[string]types.UserConfigSchema { + keys := mergeKeys(was, have) + if len(keys) == 0 { + return nil + } - if !readExists { - nv.Enum = append(nv.Enum, vn) - } - } + r := make(map[string]types.UserConfigSchema) + for _, k := range keys { + var w, h *types.UserConfigSchema + if v, ok := was[k]; ok { + w = &v } - if _, ok := gen[k]; !ok { - nv.IsDeprecated = true - - if nv.DeprecationNotice == "" { - nv.DeprecationNotice = "This property is deprecated." - } - } else { - nv.Title = gen[k].Title - - nv.Description = gen[k].Description - - nv.Type = gen[k].Type - - nv.Default = gen[k].Default - - nv.Required = gen[k].Required - - if len(nv.Properties) == 0 { - nv.Properties = gen[k].Properties - } - - if nv.Items == nil { - nv.Items = gen[k].Items - } - - if len(nv.OneOf) == 0 { - nv.OneOf = gen[k].OneOf - } - - if len(nv.Enum) == 0 { - nv.Enum = gen[k].Enum - } - - nv.Minimum = gen[k].Minimum - - nv.Maximum = gen[k].Maximum - - nv.MinLength = gen[k].MinLength - - nv.MaxLength = gen[k].MaxLength - - nv.MaxItems = gen[k].MaxItems - - nv.CreateOnly = gen[k].CreateOnly - - nv.Pattern = gen[k].Pattern - - nv.Example = gen[k].Example - - nv.UserError = gen[k].UserError - - nv.Secure = gen[k].Secure + if v, ok := have[k]; ok { + h = &v } - resultSchema[k] = nv - } - - kg := maps.Keys(gen) - - for _, k := range kg { - if _, ok := read[k]; !ok { - resultSchema[k] = gen[k] - } + r[k] = *diffTwo(w, h) } - return resultSchema, nil + return r } -// diffServiceTypes diffs the service types. -func diffServiceTypes() error { - defer util.MeasureExecutionTime(logger)() - - schema, err := diff(genResult[types.KeyServiceTypes], readResult[types.KeyServiceTypes]) - if err != nil { - return err - } - - result[types.KeyServiceTypes] = schema - - return nil +func stringify(v any) string { + return fmt.Sprintf("%v", v) } -// diffIntegrationTypes diffs the integration types. -func diffIntegrationTypes() error { - defer util.MeasureExecutionTime(logger)() - - schema, err := diff(genResult[types.KeyIntegrationTypes], readResult[types.KeyIntegrationTypes]) - if err != nil { - return err +// mergeKeys merges the keys of the given maps and returns them sorted. +func mergeKeys[T any](args ...map[string]T) []string { + if len(args) == 0 { + return nil } - result[types.KeyIntegrationTypes] = schema - - return nil -} - -func diffIntegrationEndpointTypes() error { - defer util.MeasureExecutionTime(logger)() - - schema, err := diff(genResult[types.KeyIntegrationEndpointTypes], readResult[types.KeyIntegrationEndpointTypes]) - if err != nil { - return err + seen := make(map[string]bool) + for _, m := range args { + for k := range m { + seen[k] = true + } } - result[types.KeyIntegrationEndpointTypes] = schema - - return nil -} - -// setup sets up the diff. -func setup(l *util.Logger, gr types.GenerationResult, rr types.ReadResult) { - logger = l - genResult = gr - readResult = rr + keys := make([]string, 0, len(seen)) + for k := range seen { + keys = append(keys, k) + } - result = types.DiffResult{} + slices.Sort(keys) + return keys } -// Run runs the diff. -func Run( - ctx context.Context, - logger *util.Logger, - genResult types.GenerationResult, - readResult types.ReadResult, -) (types.DiffResult, error) { - setup(logger, genResult, readResult) - - errs, _ := errgroup.WithContext(ctx) - - errs.Go(diffServiceTypes) - errs.Go(diffIntegrationTypes) - errs.Go(diffIntegrationEndpointTypes) +// mapValues returns the values of the given map sorted by the keys. +func mapValues[T any](m map[string]T) []T { + if len(m) == 0 { + return nil + } - err := errs.Wait() - if err != nil { - return nil, err + list := make([]T, 0, len(m)) + for _, k := range mergeKeys(m) { + list = append(list, m[k]) } - return result, nil + return list } diff --git a/internal/diff/diff_test.go b/internal/diff/diff_test.go index aa9946d..b1dbb9b 100644 --- a/internal/diff/diff_test.go +++ b/internal/diff/diff_test.go @@ -1,16 +1,18 @@ -// Package diff is the package that contains the diff functionality. +// Package diffMaps is the package that contains the diffMaps functionality. package diff import ( + "encoding/json" "testing" "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/assert" "github.com/aiven/go-api-schemas/internal/pkg/types" "github.com/aiven/go-api-schemas/internal/pkg/util" ) -// TestDiff tests the diff function. +// TestDiff tests the diff functions. // nolint:funlen,lll // This function is long, but it's a test function. // // These lines are long, but they're test data. func TestDiff(t *testing.T) { @@ -20,10 +22,9 @@ func TestDiff(t *testing.T) { } tests := []struct { - name string - args args - want map[string]types.UserConfigSchema - wantErr error + name string + args args + want map[string]types.UserConfigSchema }{ { name: "no read schema", @@ -74,7 +75,6 @@ func TestDiff(t *testing.T) { UserError: "", }, }, - wantErr: nil, }, { name: "equal schemas", @@ -146,7 +146,6 @@ func TestDiff(t *testing.T) { UserError: "", }, }, - wantErr: nil, }, { name: "different schemas", @@ -430,7 +429,6 @@ func TestDiff(t *testing.T) { UserError: "bar", }, }, - wantErr: nil, }, { name: "gen new property", @@ -605,7 +603,6 @@ func TestDiff(t *testing.T) { UserError: "", }, }, - wantErr: nil, }, { name: "read new property", @@ -783,7 +780,6 @@ func TestDiff(t *testing.T) { UserError: "", }, }, - wantErr: nil, }, { name: "gen new item property", @@ -1078,7 +1074,6 @@ func TestDiff(t *testing.T) { UserError: "", }, }, - wantErr: nil, }, { name: "read new item property", @@ -1376,7 +1371,6 @@ func TestDiff(t *testing.T) { UserError: "", }, }, - wantErr: nil, }, { name: "gen new item one of property", @@ -1409,7 +1403,7 @@ func TestDiff(t *testing.T) { UserConfigSchemaDeprecationInfo: types.UserConfigSchemaDeprecationInfo{}, Title: "qux", Description: "", - Type: nil, + Type: "string", Default: nil, Properties: nil, Items: nil, @@ -1429,7 +1423,7 @@ func TestDiff(t *testing.T) { UserConfigSchemaDeprecationInfo: types.UserConfigSchemaDeprecationInfo{}, Title: "quux", Description: "", - Type: nil, + Type: "object", Default: nil, Properties: nil, Items: nil, @@ -1512,7 +1506,7 @@ func TestDiff(t *testing.T) { UserConfigSchemaDeprecationInfo: types.UserConfigSchemaDeprecationInfo{}, Title: "qux", Description: "", - Type: nil, + Type: "string", Default: nil, Properties: nil, Items: nil, @@ -1594,9 +1588,9 @@ func TestDiff(t *testing.T) { OneOf: []types.UserConfigSchema{ { UserConfigSchemaDeprecationInfo: types.UserConfigSchemaDeprecationInfo{}, - Title: "qux", + Title: "quux", Description: "", - Type: nil, + Type: "object", Default: nil, Properties: nil, Items: nil, @@ -1614,9 +1608,9 @@ func TestDiff(t *testing.T) { }, { UserConfigSchemaDeprecationInfo: types.UserConfigSchemaDeprecationInfo{}, - Title: "quux", + Title: "qux", Description: "", - Type: nil, + Type: "string", Default: nil, Properties: nil, Items: nil, @@ -1671,7 +1665,6 @@ func TestDiff(t *testing.T) { UserError: "", }, }, - wantErr: nil, }, { name: "read new item one of property", @@ -1704,7 +1697,7 @@ func TestDiff(t *testing.T) { UserConfigSchemaDeprecationInfo: types.UserConfigSchemaDeprecationInfo{}, Title: "qux", Description: "", - Type: nil, + Type: "string", Default: nil, Properties: nil, Items: nil, @@ -1787,7 +1780,7 @@ func TestDiff(t *testing.T) { UserConfigSchemaDeprecationInfo: types.UserConfigSchemaDeprecationInfo{}, Title: "qux", Description: "", - Type: nil, + Type: "string", Default: nil, Properties: nil, Items: nil, @@ -1807,7 +1800,7 @@ func TestDiff(t *testing.T) { UserConfigSchemaDeprecationInfo: types.UserConfigSchemaDeprecationInfo{}, Title: "quux", Description: "", - Type: nil, + Type: "object", Default: nil, Properties: nil, Items: nil, @@ -1887,26 +1880,6 @@ func TestDiff(t *testing.T) { Properties: nil, Items: nil, OneOf: []types.UserConfigSchema{ - { - UserConfigSchemaDeprecationInfo: types.UserConfigSchemaDeprecationInfo{}, - Title: "qux", - Description: "", - Type: nil, - Default: nil, - Properties: nil, - Items: nil, - OneOf: nil, - Enum: nil, - Minimum: util.Ref(0.0), - Maximum: util.Ref(0.0), - MinLength: util.Ref(0), - MaxLength: util.Ref(0), - MaxItems: util.Ref(0), - CreateOnly: false, - Pattern: "", - Example: nil, - UserError: "", - }, { UserConfigSchemaDeprecationInfo: types.UserConfigSchemaDeprecationInfo{ IsDeprecated: true, @@ -1914,7 +1887,7 @@ func TestDiff(t *testing.T) { }, Title: "quux", Description: "", - Type: nil, + Type: "object", Default: nil, Properties: nil, Items: nil, @@ -1930,6 +1903,26 @@ func TestDiff(t *testing.T) { Example: nil, UserError: "", }, + { + UserConfigSchemaDeprecationInfo: types.UserConfigSchemaDeprecationInfo{}, + Title: "qux", + Description: "", + Type: "string", + Default: nil, + Properties: nil, + Items: nil, + OneOf: nil, + Enum: nil, + Minimum: util.Ref(0.0), + Maximum: util.Ref(0.0), + MinLength: util.Ref(0), + MaxLength: util.Ref(0), + MaxItems: util.Ref(0), + CreateOnly: false, + Pattern: "", + Example: nil, + UserError: "", + }, }, Enum: nil, Minimum: util.Ref(0.0), @@ -1969,7 +1962,6 @@ func TestDiff(t *testing.T) { UserError: "", }, }, - wantErr: nil, }, { name: "gen new enum value", @@ -2049,7 +2041,6 @@ func TestDiff(t *testing.T) { UserError: "", }, }, - wantErr: nil, }, { name: "read new enum value", @@ -2135,22 +2126,15 @@ func TestDiff(t *testing.T) { UserError: "", }, }, - wantErr: nil, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := diff(tt.args.g, tt.args.r) - if !cmp.Equal(err, tt.wantErr) { - t.Errorf("diff() error = %v, wantErr %v", err, tt.wantErr) - - return - } - - if !cmp.Equal(got, tt.want) { - t.Errorf("diff() = %v, want %v", got, tt.want) - } + got := diffMaps(tt.args.r, tt.args.g) + bGot, _ := json.MarshalIndent(&got, "", " ") + bWant, _ := json.MarshalIndent(&tt.want, "", " ") + assert.Empty(t, cmp.Diff(bWant, bGot)) }) } } diff --git a/internal/pkg/types/types.go b/internal/pkg/types/types.go index faefafa..cc0b5b9 100644 --- a/internal/pkg/types/types.go +++ b/internal/pkg/types/types.go @@ -7,6 +7,12 @@ type UserConfigSchemaDeprecationInfo struct { DeprecationNotice string `yaml:"deprecation_notice,omitempty"` } +// Deprecate sets the deprecation info for a user config schema entry. +func (u *UserConfigSchemaDeprecationInfo) Deprecate(msg string) { + u.IsDeprecated = true + u.DeprecationNotice = msg +} + // UserConfigSchemaEnumValue is a struct that contains the enum value for a user config schema entry. type UserConfigSchemaEnumValue struct { UserConfigSchemaDeprecationInfo `yaml:",inline"` @@ -58,3 +64,12 @@ const ( // KeyIntegrationEndpointTypes is the key for the integration endpoint types. KeyIntegrationEndpointTypes ) + +// GetTypeKeys returns the type keys. +func GetTypeKeys() []int { + return []int{ + KeyServiceTypes, + KeyIntegrationTypes, + KeyIntegrationEndpointTypes, + } +} diff --git a/pkg/dist/integration_endpoint_types.yml b/pkg/dist/integration_endpoint_types.yml index 92268a5..fe9353c 100644 --- a/pkg/dist/integration_endpoint_types.yml +++ b/pkg/dist/integration_endpoint_types.yml @@ -102,12 +102,12 @@ datadog: title: Datadog intake site. Defaults to datadoghq.com type: string enum: + - value: ap1.datadoghq.com - value: datadoghq.com - value: datadoghq.eu + - value: ddog-gov.com - value: us3.datadoghq.com - value: us5.datadoghq.com - - value: ddog-gov.com - - value: ap1.datadoghq.com example: datadoghq.com external_aws_cloudwatch_logs: type: object @@ -359,9 +359,9 @@ external_kafka: type: string enum: - value: PLAINTEXT - - value: SSL - value: SASL_PLAINTEXT - value: SASL_SSL + - value: SSL example: PLAINTEXT ssl_ca_cert: title: PEM-encoded CA certificate @@ -550,10 +550,10 @@ external_postgresql: enum: - is_deprecated: true deprecation_notice: This value is deprecated. - value: disable + value: allow - is_deprecated: true deprecation_notice: This value is deprecated. - value: allow + value: disable - is_deprecated: true deprecation_notice: This value is deprecated. value: prefer @@ -608,8 +608,8 @@ external_schema_registry: title: Authentication method type: string enum: - - value: none - value: basic + - value: none example: basic basic_auth_password: title: Basic authentication password @@ -700,9 +700,9 @@ rsyslog: type: string default: rfc5424 enum: - - value: rfc5424 - - value: rfc3164 - value: custom + - value: rfc3164 + - value: rfc5424 example: rfc5424 key: title: PEM encoded client key diff --git a/pkg/dist/integration_types.yml b/pkg/dist/integration_types.yml index 42bae34..c30b505 100644 --- a/pkg/dist/integration_types.yml +++ b/pkg/dist/integration_types.yml @@ -27,12 +27,12 @@ clickhouse_kafka: type: string default: earliest enum: - - value: smallest - - value: earliest - value: beginning + - value: earliest + - value: end - value: largest - value: latest - - value: end + - value: smallest example: latest columns: title: Table columns @@ -63,6 +63,7 @@ clickhouse_kafka: default: JSONEachRow enum: - value: Avro + - value: AvroConfluent - value: CSV - value: JSONAsString - value: JSONCompactEachRow @@ -70,12 +71,11 @@ clickhouse_kafka: - value: JSONEachRow - value: JSONStringsEachRow - value: MsgPack + - value: Parquet + - value: RawBLOB - value: TSKV - value: TSV - value: TabSeparated - - value: RawBLOB - - value: AvroConfluent - - value: Parquet example: JSONEachRow date_time_input_format: title: Method to read DateTime from text input formats @@ -292,6 +292,10 @@ datadog: items: title: Metric name type: string + enum: + - value: kafka.log.log_end_offset + - value: kafka.log.log_size + - value: kafka.log.log_start_offset max_length: 1024 example: kafka.log.log_size max_items: 1024 @@ -359,8 +363,8 @@ external_aws_cloudwatch_logs: - value: HOSTNAME - value: PRIORITY - value: REALTIME_TIMESTAMP - - value: service_name - value: SYSTEMD_UNIT + - value: service_name max_items: 5 external_aws_cloudwatch_metrics: title: External AWS CloudWatch Metrics integration user config @@ -429,8 +433,8 @@ external_elasticsearch_logs: - value: HOSTNAME - value: PRIORITY - value: REALTIME_TIMESTAMP - - value: service_name - value: SYSTEMD_UNIT + - value: service_name max_items: 5 external_google_cloud_logging: title: Integration user config @@ -450,8 +454,8 @@ external_opensearch_logs: - value: HOSTNAME - value: PRIORITY - value: REALTIME_TIMESTAMP - - value: service_name - value: SYSTEMD_UNIT + - value: service_name max_items: 5 flink: title: Integration user config @@ -528,8 +532,8 @@ kafka_logs: - value: HOSTNAME - value: PRIORITY - value: REALTIME_TIMESTAMP - - value: service_name - value: SYSTEMD_UNIT + - value: service_name max_items: 5 kafka_mirrormaker: title: Integration user config @@ -592,10 +596,10 @@ kafka_mirrormaker: type: string enum: - value: gzip - - value: snappy - value: lz4 - - value: zstd - value: none + - value: snappy + - value: zstd producer_linger_ms: title: producer.linger.ms description: The linger time (ms) for waiting new data to arrive for publishing. @@ -638,8 +642,8 @@ logs: - value: HOSTNAME - value: PRIORITY - value: REALTIME_TIMESTAMP - - value: service_name - value: SYSTEMD_UNIT + - value: service_name max_items: 5 m3aggregator: title: Integration user config diff --git a/pkg/dist/service_types.yml b/pkg/dist/service_types.yml index 6cfc924..52e02b0 100644 --- a/pkg/dist/service_types.yml +++ b/pkg/dist/service_types.yml @@ -226,18 +226,18 @@ alloydbomni: description: Controls the amount of detail written in the server log for each message that is logged. type: string enum: - - value: TERSE - value: DEFAULT + - value: TERSE - value: VERBOSE log_line_prefix: title: log_line_prefix description: Choose from one of the available log formats. type: string enum: + - value: '''%m [%p] %q[user=%u,db=%d,app=%a] ''' + - value: '''%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h ''' - value: '''pid=%p,user=%u,db=%d,app=%a,client=%h ''' - value: '''pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q ''' - - value: '''%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h ''' - - value: '''%m [%p] %q[user=%u,db=%d,app=%a] ''' log_min_duration_statement: title: log_min_duration_statement description: Log statements that take more than this number of milliseconds to run, -1 disables @@ -356,8 +356,8 @@ alloydbomni: - string enum: - value: all - - value: top - value: none + - value: top temp_file_limit: title: temp_file_limit description: PostgreSQL temporary file limit in KiB, -1 for unlimited @@ -393,8 +393,8 @@ alloydbomni: type: string enum: - value: all - - value: pl - value: none + - value: pl track_io_timing: title: track_io_timing description: Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms. @@ -462,9 +462,9 @@ alloydbomni: type: string default: transaction enum: - - value: transaction - value: session - value: statement + - value: transaction example: session autodb_pool_size: title: If non-zero then create automatically a pool of that size per user when a pool doesn't exist. @@ -628,8 +628,8 @@ alloydbomni: title: Synchronous replication type. Note that the service plan also needs to support synchronous replication. type: string enum: - - value: quorum - value: "off" + - value: quorum example: "off" variant: title: Variant of the PostgreSQL service, may affect the features that are exposed by default @@ -651,8 +651,6 @@ cassandra: type: object properties: additional_backup_regions: - is_deprecated: true - deprecation_notice: This property is deprecated. title: Additional Cloud Regions for Backup Replication type: array items: @@ -723,10 +721,10 @@ cassandra: - string - "null" enum: - - value: "4" - is_deprecated: true deprecation_notice: This value is deprecated. value: "3" + - value: "4" - value: "4.1" pattern: ^[0-9]+(\.[0-9]+)?$ ip_filter: @@ -965,9 +963,9 @@ dragonfly: description: When persistence is 'rdb' or 'dfs', Dragonfly does RDB or DFS dumps every 10 minutes. Dumps are done according to the backup schedule for backup purposes. When persistence is 'off', no RDB/DFS dumps or backups are done, so data can be lost at any moment if the service is restarted for any reason, or if the service is powered off. Also, the service can't be forked. type: string enum: + - value: dfs - value: "off" - value: rdb - - value: dfs dragonfly_ssl: title: Require SSL to access Dragonfly type: boolean @@ -1861,8 +1859,8 @@ grafana: type: string enum: - value: alerting - - value: no_data - value: keep_state + - value: no_data - value: ok example: ok allow_embedding: @@ -2073,7 +2071,6 @@ grafana: title: Team ID type: integer minimum: 1 - maximum: 9.223372036854776e+18 example: "150" max_items: 50 auth_gitlab: @@ -2170,8 +2167,8 @@ grafana: type: string enum: - value: lax - - value: strict - value: none + - value: strict example: lax custom_domain: title: Custom domain @@ -2456,9 +2453,9 @@ grafana: title: Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS. type: string enum: - - value: OpportunisticStartTLS - value: MandatoryStartTLS - value: NoStartTLS + - value: OpportunisticStartTLS example: NoStartTLS username: title: Username for SMTP authentication @@ -2485,9 +2482,9 @@ grafana: title: Set role for new signups. Defaults to Viewer type: string enum: - - value: Viewer - value: Admin - value: Editor + - value: Viewer example: Viewer viewers_can_edit: title: Users with view-only permission can edit but not save dashboards @@ -2735,11 +2732,11 @@ kafka: type: string enum: - value: gzip - - value: snappy - value: lz4 - - value: zstd - - value: uncompressed - value: producer + - value: snappy + - value: uncompressed + - value: zstd connections_max_idle_ms: title: connections.max.idle.ms description: 'Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes))' @@ -2803,9 +2800,9 @@ kafka: description: 'The default cleanup policy for segments beyond the retention window (Default: delete)' type: string enum: - - value: delete - value: compact - value: compact,delete + - value: delete example: delete log_flush_interval_messages: title: log.flush.interval.messages @@ -3034,8 +3031,8 @@ kafka: description: Defines what client configurations can be overridden by the connector. Default is None type: string enum: - - value: None - value: All + - value: None consumer_auto_offset_reset: title: Consumer auto offset reset description: What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest @@ -3055,8 +3052,8 @@ kafka: description: Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired. type: string enum: - - value: read_uncommitted - value: read_committed + - value: read_uncommitted consumer_max_partition_fetch_bytes: title: The maximum amount of data per-partition the server will return. description: 'Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. ' @@ -3112,10 +3109,10 @@ kafka: type: string enum: - value: gzip - - value: snappy - value: lz4 - - value: zstd - value: none + - value: snappy + - value: zstd producer_linger_ms: title: Wait for up to the given delay to allow batching records together description: 'This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will ''linger'' for the specified time waiting for more records to show up. Defaults to 0.' @@ -3260,8 +3257,8 @@ kafka: type: string default: topic_name enum: - - value: topic_name - value: record_name + - value: topic_name - value: topic_record_name name_strategy_validation: title: name.strategy.validation @@ -3274,20 +3271,20 @@ kafka: type: string default: "1" enum: - - value: all - value: "-1" - value: "0" - value: "1" + - value: all producer_compression_type: title: producer.compression.type description: Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression. type: string enum: - value: gzip - - value: snappy - value: lz4 - - value: zstd - value: none + - value: snappy + - value: zstd producer_linger_ms: title: producer.linger.ms description: Wait for up to the given delay to allow batching records together @@ -3333,13 +3330,13 @@ kafka: enum: - is_deprecated: true deprecation_notice: This value is deprecated. - value: "3.2" + value: "3.1" - is_deprecated: true deprecation_notice: This value is deprecated. - value: "3.3" + value: "3.2" - is_deprecated: true deprecation_notice: This value is deprecated. - value: "3.1" + value: "3.3" - is_deprecated: true deprecation_notice: This value is deprecated. value: "3.4" @@ -3560,8 +3557,8 @@ kafka_connect: description: Defines what client configurations can be overridden by the connector. Default is None type: string enum: - - value: None - value: All + - value: None consumer_auto_offset_reset: title: Consumer auto offset reset description: What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest @@ -3581,8 +3578,8 @@ kafka_connect: description: Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired. type: string enum: - - value: read_uncommitted - value: read_committed + - value: read_uncommitted consumer_max_partition_fetch_bytes: title: The maximum amount of data per-partition the server will return. description: 'Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. ' @@ -3638,10 +3635,10 @@ kafka_connect: type: string enum: - value: gzip - - value: snappy - value: lz4 - - value: zstd - value: none + - value: snappy + - value: zstd producer_linger_ms: title: Wait for up to the given delay to allow batching records together description: 'This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will ''linger'' for the specified time waiting for more records to show up. Defaults to 0.' @@ -4313,12 +4310,6 @@ m3db: type: array items: one_of: - - title: Namespaces matched by this glob - description: Filter the namespace by glob (=wildcards) - type: string - max_length: 256 - pattern: ^[a-zA-Z_0-9*]+$ - example: aggregated_* - title: Namespaces matching this storage policy description: Filter the namespace by exact match of retention period and resolution type: object @@ -4338,6 +4329,12 @@ m3db: max_length: 16 pattern: ^[0-9]+[smhd]$ example: 48h + - title: Namespaces matched by this glob + description: Filter the namespace by glob (=wildcards) + type: string + max_length: 256 + pattern: ^[a-zA-Z_0-9*]+$ + example: aggregated_* max_items: 10 tags: title: List of tags to be appended to matching metrics @@ -4652,8 +4649,8 @@ mysql: description: The storage engine for in-memory internal temporary tables. type: string enum: - - value: TempTable - value: MEMORY + - value: TempTable example: TempTable log_output: title: log_output @@ -4661,9 +4658,9 @@ mysql: type: string enum: - value: INSIGHTS + - value: INSIGHTS,TABLE - value: NONE - value: TABLE - - value: INSIGHTS,TABLE example: INSIGHTS long_query_time: title: long_query_time @@ -5519,6 +5516,7 @@ opensearch: - string - "null" max_length: 261 + pattern: ^[^\r\n]*$ example: anotherservice.aivencloud.com:12398 max_items: 32 script_max_compilations_rate: @@ -5537,9 +5535,9 @@ opensearch: description: The search backpressure mode. Valid values are monitor_only, enforced, or disabled. Default is monitor_only type: string enum: - - value: monitor_only - - value: enforced - value: disabled + - value: enforced + - value: monitor_only node_duress: title: Node duress settings type: object @@ -6338,17 +6336,17 @@ pg: description: Controls the amount of detail written in the server log for each message that is logged. type: string enum: - - value: TERSE - value: DEFAULT + - value: TERSE - value: VERBOSE log_line_prefix: title: log_line_prefix description: Choose from one of the available log formats. type: string enum: - - value: '''pid=%p,user=%u,db=%d,app=%a,client=%h ''' - - value: '''%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h ''' - value: '''%m [%p] %q[user=%u,db=%d,app=%a] ''' + - value: '''%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h ''' + - value: '''pid=%p,user=%u,db=%d,app=%a,client=%h ''' - value: '''pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q ''' log_min_duration_statement: title: log_min_duration_statement @@ -6480,8 +6478,8 @@ pg: - string enum: - value: all - - value: top - value: none + - value: top temp_file_limit: title: temp_file_limit description: PostgreSQL temporary file limit in KiB, -1 for unlimited @@ -6517,8 +6515,8 @@ pg: type: string enum: - value: all - - value: pl - value: none + - value: pl track_io_timing: title: track_io_timing description: Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms. @@ -6611,6 +6609,9 @@ pg: - string - "null" enum: + - is_deprecated: true + deprecation_notice: This value is deprecated. + value: "10" - is_deprecated: true deprecation_notice: This value is deprecated. value: "11" @@ -6618,9 +6619,6 @@ pg: - value: "13" - value: "14" - value: "15" - - is_deprecated: true - deprecation_notice: This value is deprecated. - value: "10" - value: "16" pgaudit: is_deprecated: true @@ -6780,8 +6778,8 @@ pg: default: transaction enum: - value: session - - value: transaction - value: statement + - value: transaction example: session autodb_pool_size: title: If non-zero then create automatically a pool of that size per user when a pool doesn't exist. @@ -6945,8 +6943,8 @@ pg: title: Synchronous replication type. Note that the service plan also needs to support synchronous replication. type: string enum: - - value: quorum - value: "off" + - value: quorum example: "off" timescaledb: title: TimescaleDB extension configuration values @@ -7175,14 +7173,14 @@ redis: - "null" default: noeviction enum: - - value: noeviction + - value: allkeys-lfu - value: allkeys-lru - - value: volatile-lru - value: allkeys-random + - value: noeviction + - value: volatile-lfu + - value: volatile-lru - value: volatile-random - value: volatile-ttl - - value: volatile-lfu - - value: allkeys-lfu redis_notify_keyspace_events: title: Set notify-keyspace-events option type: string @@ -7614,14 +7612,14 @@ valkey: - "null" default: noeviction enum: - - value: noeviction + - value: allkeys-lfu - value: allkeys-lru - - value: volatile-lru - value: allkeys-random + - value: noeviction + - value: volatile-lfu + - value: volatile-lru - value: volatile-random - value: volatile-ttl - - value: volatile-lfu - - value: allkeys-lfu valkey_notify_keyspace_events: title: Set notify-keyspace-events option type: string